diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 0000000..4fc2a02 --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,20 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Vapor open source project +// +// Copyright (c) 2017-2021 Vapor project authors +// Licensed under MIT +// +// See LICENSE for license information +// +// SPDX-License-Identifier: MIT +// +//===----------------------------------------------------------------------===// + +This product contains a selection from the `OrderedCollection` module from +Swift Collections. + + * LICENSE (Apache License 2.0): + * https://swift.org/LICENSE.txt + * HOMEPAGE: + * https://github.com/apple/swift-collections \ No newline at end of file diff --git a/Package.swift b/Package.swift index a8ef7d8..5ff0744 100644 --- a/Package.swift +++ b/Package.swift @@ -10,7 +10,7 @@ let package = Package( .library(name: "MultipartKit", targets: ["MultipartKit"]), ], dependencies: [ - .package(url: "https://github.com/apple/swift-nio.git", from: "2.2.0"), + .package(url: "https://github.com/apple/swift-nio.git", from: "2.2.0") ], targets: [ .target(name: "MultipartKit", dependencies: [ diff --git a/Sources/MultipartKit/BasicCodingKey.swift b/Sources/MultipartKit/BasicCodingKey.swift index de63e43..a72ec59 100644 --- a/Sources/MultipartKit/BasicCodingKey.swift +++ b/Sources/MultipartKit/BasicCodingKey.swift @@ -29,4 +29,3 @@ internal enum BasicCodingKey: CodingKey { self = .index(intValue) } } - diff --git a/Sources/MultipartKit/Deprecated/MultipartError.swift b/Sources/MultipartKit/Deprecated/MultipartError.swift new file mode 100644 index 0000000..eae2175 --- /dev/null +++ b/Sources/MultipartKit/Deprecated/MultipartError.swift @@ -0,0 +1,11 @@ +@available(*, deprecated) +public enum MultipartError: Error, CustomStringConvertible { + case invalidFormat + case convertibleType(Any.Type) + case convertiblePart(Any.Type, MultipartPart) + case nesting + case missingPart(String) + case missingFilename + + public var description: String { "" } +} diff --git a/Sources/MultipartKit/FormDataDecoder.swift b/Sources/MultipartKit/FormDataDecoder.swift index efeec31..96f7349 100644 --- a/Sources/MultipartKit/FormDataDecoder.swift +++ b/Sources/MultipartKit/FormDataDecoder.swift @@ -4,9 +4,28 @@ /// /// Seealso `MultipartParser` for more information about the `multipart` encoding. public struct FormDataDecoder { + /// Maximum nesting depth to allow when decoding the input. + /// - 1 corresponds to a single value + /// - 2 corresponds to an an object with non-nested properties or an 1 dimensional array + /// - 3... corresponds to nested objects or multi-dimensional arrays or combinations thereof + let nestingDepth: Int + /// Creates a new `FormDataDecoder`. - public init() { } + /// - Parameter nestingDepth: maximum allowed nesting depth of the decoded structure. Defaults to 8. + public init(nestingDepth: Int = 8) { + self.nestingDepth = nestingDepth + } + /// Decodes a `Decodable` item from `String` using the supplied boundary. + /// + /// let foo = try FormDataDecoder().decode(Foo.self, from: "...", boundary: "123") + /// + /// - Parameters: + /// - decodable: Generic `Decodable` type. + /// - data: String to decode. + /// - boundary: Multipart boundary to used in the decoding. + /// - Throws: Any errors decoding the model with `Codable` or parsing the data. + /// - Returns: An instance of the decoded type `D`. public func decode(_ decodable: D.Type, from data: String, boundary: String) throws -> D where D: Decodable { @@ -17,11 +36,12 @@ public struct FormDataDecoder { /// /// let foo = try FormDataDecoder().decode(Foo.self, from: data, boundary: "123") /// - /// - parameters: - /// - encodable: Generic `Decodable` type. - /// - boundary: Multipart boundary to used in the encoding. - /// - throws: Any errors decoding the model with `Codable` or parsing the data. - /// - returns: An instance of the decoded type `D`. + /// - Parameters: + /// - decodable: Generic `Decodable` type. + /// - data: Data to decode. + /// - boundary: Multipart boundary to used in the decoding. + /// - Throws: Any errors decoding the model with `Codable` or parsing the data. + /// - Returns: An instance of the decoded type `D`. public func decode(_ decodable: D.Type, from data: [UInt8], boundary: String) throws -> D where D: Decodable { @@ -45,199 +65,180 @@ public struct FormDataDecoder { } try parser.execute(data) - let multipart = FormDataDecoderContext(parts: parts) - let decoder = _FormDataDecoder(multipart: multipart, codingPath: []) - return try D(from: decoder) + let data = MultipartFormData(parts: parts, nestingDepth: nestingDepth) + return try data.decode(codingPath: []) } } // MARK: Private -private final class FormDataDecoderContext { - var parts: [MultipartPart] - init(parts: [MultipartPart]) { - self.parts = parts - } - - func decode(_ decodable: D.Type, at codingPath: [CodingKey]) throws -> D where D: Decodable { - guard let convertible = D.self as? MultipartPartConvertible.Type else { - throw MultipartError.convertibleType(D.self) - } - - let part: MultipartPart - switch codingPath.count { - case 1: - let name = codingPath[0].stringValue - guard let p = parts.firstPart(named: name) else { - throw MultipartError.missingPart(name) - } - part = p - case 2: - let name = codingPath[0].stringValue + "[]" - guard let offset = codingPath[1].intValue else { - throw MultipartError.nesting - } - guard let p = parts.allParts(named: name)[safe: offset] else { - throw MultipartError.missingPart("\(codingPath[1].stringValue)") - } - part = p - default: - throw MultipartError.nesting - } - - guard let any = convertible.init(multipart: part) else { - throw MultipartError.convertiblePart(D.self, part) - } - return any as! D - } -} - - private struct _FormDataDecoder: Decoder { - var codingPath: [CodingKey] - var userInfo: [CodingUserInfoKey: Any] { - return [:] - } - let multipart: FormDataDecoderContext + let codingPath: [CodingKey] + let data: MultipartFormData - init(multipart: FormDataDecoderContext, codingPath: [CodingKey]) { - self.multipart = multipart - self.codingPath = codingPath - } + var userInfo: [CodingUserInfoKey: Any] { [:] } func container(keyedBy type: Key.Type) throws -> KeyedDecodingContainer where Key : CodingKey { - return KeyedDecodingContainer(_FormDataKeyedDecoder(multipart: multipart, codingPath: codingPath)) + try data.keyedContainer(codingPath: codingPath) } func unkeyedContainer() throws -> UnkeyedDecodingContainer { - return try _FormDataUnkeyedDecoder(multipart: multipart, codingPath: codingPath) + try data.unkeyedContainer(codingPath: codingPath) } func singleValueContainer() throws -> SingleValueDecodingContainer { - return _FormDataSingleValueDecoder(multipart: multipart, codingPath: codingPath) + try data.singleValueContainer(codingPath: codingPath) } } private struct _FormDataSingleValueDecoder: SingleValueDecodingContainer { var codingPath: [CodingKey] - let multipart: FormDataDecoderContext - - init(multipart: FormDataDecoderContext, codingPath: [CodingKey]) { - self.multipart = multipart - self.codingPath = codingPath - } + let part: MultipartPart func decodeNil() -> Bool { - return false + false } func decode(_ type: T.Type) throws -> T where T: Decodable { - return try multipart.decode(T.self, at: codingPath) + try part.decode(type, at: codingPath) } } private struct _FormDataKeyedDecoder: KeyedDecodingContainerProtocol where K: CodingKey { - var codingPath: [CodingKey] + let codingPath: [CodingKey] var allKeys: [K] { - return multipart.parts - .compactMap { $0.name } - .compactMap { K(stringValue: $0) } + data.keys.compactMap(K.init(stringValue:)) } - let multipart: FormDataDecoderContext + let data: MultipartFormData.Keyed - init(multipart: FormDataDecoderContext, codingPath: [CodingKey]) { - self.multipart = multipart - self.codingPath = codingPath + func contains(_ key: K) -> Bool { + data.keys.contains(key.stringValue) } - func contains(_ key: K) -> Bool { - return multipart.parts.contains { $0.name == key.stringValue } + func getValue(forKey key: K) throws -> MultipartFormData { + guard let value = data[key.stringValue] else { + throw DecodingError.keyNotFound(key, .init(codingPath: codingPath, debugDescription: "")) + } + return value } func decodeNil(forKey key: K) throws -> Bool { - return false + false } func decode(_ type: T.Type, forKey key: K) throws -> T where T : Decodable { - if T.self is MultipartPartConvertible.Type { - return try multipart.decode(T.self, at: codingPath + [key]) - } else { - let decoder = _FormDataDecoder(multipart: multipart, codingPath: codingPath + [key]) - return try T(from: decoder) - } + try getValue(forKey: key).decode(codingPath: codingPath + [key]) } func nestedContainer(keyedBy type: NestedKey.Type, forKey key: K) throws -> KeyedDecodingContainer where NestedKey : CodingKey { - return KeyedDecodingContainer(_FormDataKeyedDecoder(multipart: multipart, codingPath: codingPath + [key])) + try getValue(forKey: key).keyedContainer(codingPath: codingPath + [key]) } func nestedUnkeyedContainer(forKey key: K) throws -> UnkeyedDecodingContainer { - return try _FormDataUnkeyedDecoder(multipart: multipart, codingPath: codingPath + [key]) + try getValue(forKey: key).unkeyedContainer(codingPath: codingPath + [key]) } func superDecoder() throws -> Decoder { - return _FormDataDecoder(multipart: multipart, codingPath: codingPath) + fatalError() } func superDecoder(forKey key: K) throws -> Decoder { - return _FormDataDecoder(multipart: multipart, codingPath: codingPath + [key]) + fatalError() } } private struct _FormDataUnkeyedDecoder: UnkeyedDecodingContainer { + var index: CodingKey { BasicCodingKey.index(currentIndex) } + var isAtEnd: Bool { currentIndex >= data.count } var codingPath: [CodingKey] - var count: Int? - var isAtEnd: Bool { - return currentIndex >= count! + var count: Int? { data.count } + var currentIndex: Int = 0 + var data: [MultipartFormData] + + mutating func decodeNil() throws -> Bool { + false } - var currentIndex: Int - var index: CodingKey { - return BasicCodingKey.index(self.currentIndex) + + mutating func decode(_ type: T.Type) throws -> T where T: Decodable { + defer { currentIndex += 1 } + return try data[currentIndex].decode(codingPath: codingPath + [index]) } - let multipart: FormDataDecoderContext + func nestedContainer(keyedBy type: NestedKey.Type) throws -> KeyedDecodingContainer where NestedKey : CodingKey { + try data[currentIndex].keyedContainer(codingPath: codingPath + [index]) + } - init(multipart: FormDataDecoderContext, codingPath: [CodingKey]) throws { - self.multipart = multipart - self.codingPath = codingPath + func nestedUnkeyedContainer() throws -> UnkeyedDecodingContainer { + try data[currentIndex].unkeyedContainer(codingPath: codingPath) + } - let name: String - switch codingPath.count { - case 1: name = codingPath[0].stringValue - default: - throw MultipartError.nesting - } - let parts = multipart.parts.allParts(named: name + "[]") - self.count = parts.count - self.currentIndex = 0 + func superDecoder() throws -> Decoder { + fatalError() } +} - mutating func decodeNil() throws -> Bool { - return false +private extension MultipartFormData { + func keyedContainer(codingPath: [CodingKey]) throws -> KeyedDecodingContainer { + guard let dictionary = self.dictionary else { + throw DecodingError.typeMismatch(dataType, .init(codingPath: codingPath, debugDescription: "expected dictionary but encountered \(dataTypeDescription)")) + } + return KeyedDecodingContainer(_FormDataKeyedDecoder(codingPath: codingPath, data: dictionary)) } - mutating func decode(_ type: T.Type) throws -> T where T: Decodable { - defer { currentIndex += 1 } - if T.self is MultipartPartConvertible.Type { - return try multipart.decode(T.self, at: codingPath + [index]) - } else { - let decoder = _FormDataDecoder(multipart: multipart, codingPath: codingPath + [index]) - return try T(from: decoder) + func unkeyedContainer(codingPath: [CodingKey]) throws -> UnkeyedDecodingContainer { + guard let array = self.array else { + throw DecodingError.typeMismatch(dataType, .init(codingPath: codingPath, debugDescription: "expected array but encountered \(dataTypeDescription)")) } + return _FormDataUnkeyedDecoder(codingPath: codingPath, data: array) } - mutating func nestedContainer(keyedBy type: NestedKey.Type) throws -> KeyedDecodingContainer where NestedKey : CodingKey { - return KeyedDecodingContainer(_FormDataKeyedDecoder(multipart: multipart, codingPath: codingPath + [index])) + func singleValueContainer(codingPath: [CodingKey]) throws -> SingleValueDecodingContainer { + guard let part = self.part else { + throw DecodingError.typeMismatch(dataType, .init(codingPath: codingPath, debugDescription: "expected single value but encountered \(dataTypeDescription)")) + } + return _FormDataSingleValueDecoder(codingPath: codingPath, part: part) } - mutating func nestedUnkeyedContainer() throws -> UnkeyedDecodingContainer { - return try _FormDataUnkeyedDecoder(multipart: multipart, codingPath: codingPath + [index]) + var dataTypeDescription: String { + switch self { + case .array: return "array" + case .keyed: return "dictionary" + case .single: return "single value" + } } - mutating func superDecoder() throws -> Decoder { - return _FormDataDecoder(multipart: multipart, codingPath: codingPath + [index]) + var dataType: Any.Type { + switch self { + case .array: return [MultipartFormData].self + case .keyed: return Keyed.self + case .single: return MultipartPart.self + } } + func decode(codingPath: [CodingKey]) throws -> T where T: Decodable { + guard let part = part else { + return try T(from: _FormDataDecoder(codingPath: codingPath, data: self)) + } + + return try part.decode(T.self, at: codingPath) + } +} +private extension MultipartPart { + func decode(_ type: T.Type, at codingPath: [CodingKey]) throws -> T where T: Decodable { + guard + let Convertible = T.self as? MultipartPartConvertible.Type, + let decoded = Convertible.init(multipart: self) as? T + else { + let path = codingPath.map(\.stringValue).joined(separator: ".") + throw DecodingError.dataCorrupted( + .init( + codingPath: codingPath, + debugDescription: #"Could not convert value at "\#(path)" to type \#(T.self) from multipart part."# + ) + ) + } + return decoded + } } diff --git a/Sources/MultipartKit/FormDataEncoder.swift b/Sources/MultipartKit/FormDataEncoder.swift index d622ed7..ba63bfb 100644 --- a/Sources/MultipartKit/FormDataEncoder.swift +++ b/Sources/MultipartKit/FormDataEncoder.swift @@ -12,9 +12,9 @@ public struct FormDataEncoder { public func encode(_ encodable: E, boundary: String) throws -> String where E: Encodable { - var buffer = ByteBufferAllocator().buffer(capacity: 0) - try self.encode(encodable, boundary: boundary, into: &buffer) - return String(decoding: buffer.readableBytesView, as: UTF8.self) + let encoder = _Encoder(codingPath: []) + try encodable.encode(to: encoder) + return try MultipartSerializer().serialize(parts: encoder.getData().namedParts(), boundary: boundary) } /// Encodes an `Encodable` item to `Data` using the supplied boundary. @@ -30,158 +30,230 @@ public struct FormDataEncoder { public func encode(_ encodable: E, boundary: String, into buffer: inout ByteBuffer) throws where E: Encodable { - let multipart = FormDataEncoderContext() - let encoder = _FormDataEncoder(multipart: multipart, codingPath: []) + let encoder = _Encoder(codingPath: []) try encodable.encode(to: encoder) - try MultipartSerializer().serialize(parts: multipart.parts, boundary: boundary, into: &buffer) + try MultipartSerializer().serialize(parts: encoder.getData().namedParts(), boundary: boundary, into: &buffer) } } -// MARK: Private +// MARK: - Private -private final class FormDataEncoderContext { - var parts: [MultipartPart] - init() { - self.parts = [] - } +// MARK: _Container - func encode(_ encodable: E, at codingPath: [CodingKey]) throws where E: Encodable { - guard let convertible = encodable as? MultipartPartConvertible else { - throw MultipartError.convertibleType(E.self) - } - - guard var part = convertible.multipart else { - throw MultipartError.convertibleType(E.self) - } - - switch codingPath.count { - case 1: part.name = codingPath[0].stringValue - case 2: - guard codingPath[1].intValue != nil else { - throw MultipartError.nesting - } - part.name = codingPath[0].stringValue + "[]" - default: - throw MultipartError.nesting - } - self.parts.append(part) - } +private protocol _Container { + func getData() -> MultipartFormData } -private struct _FormDataEncoder: Encoder { - let codingPath: [CodingKey] - let multipart: FormDataEncoderContext - var userInfo: [CodingUserInfoKey: Any] { - return [:] - } +// MARK: _Encoder - init(multipart: FormDataEncoderContext, codingPath: [CodingKey]) { - self.multipart = multipart +private final class _Encoder { + init(codingPath: [CodingKey]) { self.codingPath = codingPath } + private var container: _Container? = nil + var codingPath: [CodingKey] +} - func container(keyedBy type: Key.Type) -> KeyedEncodingContainer where Key : CodingKey { - return KeyedEncodingContainer(_FormDataKeyedEncoder(multipart: multipart, codingPath: codingPath)) +extension _Encoder: Encoder { + var userInfo: [CodingUserInfoKey: Any] { [:] } + + func container(keyedBy type: Key.Type) -> KeyedEncodingContainer + where Key: CodingKey + { + let container = KeyedContainer(codingPath: codingPath) + self.container = container + return .init(container) } func unkeyedContainer() -> UnkeyedEncodingContainer { - return _FormDataUnkeyedEncoder(multipart: multipart, codingPath: codingPath) + let container = UnkeyedContainer(codingPath: codingPath) + self.container = container + return container } func singleValueContainer() -> SingleValueEncodingContainer { - return _FormDataSingleValueEncoder(multipart: multipart, codingPath: codingPath) + let container = SingleValueContainer(codingPath: codingPath) + self.container = container + return container } } -private struct _FormDataSingleValueEncoder: SingleValueEncodingContainer { - let multipart: FormDataEncoderContext - var codingPath: [CodingKey] +extension _Encoder: _Container { + func getData() -> MultipartFormData { + container?.getData() ?? .array([]) + } +} - init(multipart: FormDataEncoderContext, codingPath: [CodingKey]) { - self.multipart = multipart - self.codingPath = codingPath +// MARK: _Encoder.KeyedContainer + +extension _Encoder { + final class KeyedContainer where Key: CodingKey { + var codingPath: [CodingKey] + var data: OrderedDictionary = [:] + + init(codingPath: [CodingKey]) { + self.codingPath = codingPath + } } +} - mutating func encodeNil() throws { - // do nothing +extension _Encoder.KeyedContainer: KeyedEncodingContainerProtocol { + func encodeNil(forKey _: Key) throws { + // skip } - mutating func encode(_ value: T) throws where T : Encodable { - try multipart.encode(value, at: codingPath) + func encode(_ value: T, forKey key: Key) throws + where T : Encodable + { + if let convertible = value as? MultipartPartConvertible { + if let part = convertible.multipart { + data[key.stringValue] = .data(.single(part)) + } + } else { + let encoder = _Encoder(codingPath: codingPath + [key]) + try value.encode(to: encoder) + data[key.stringValue] = .data(encoder.getData()) + } } -} -private struct _FormDataKeyedEncoder: KeyedEncodingContainerProtocol where K: CodingKey { - let multipart: FormDataEncoderContext - var codingPath: [CodingKey] + func nestedContainer(keyedBy keyType: NestedKey.Type, forKey key: Key) -> KeyedEncodingContainer + where NestedKey: CodingKey + { + let container = _Encoder.KeyedContainer(codingPath: codingPath + [key]) + data[key.stringValue] = .container(container) + return .init(container) + } - init(multipart: FormDataEncoderContext, codingPath: [CodingKey]) { - self.multipart = multipart - self.codingPath = codingPath + /// See `KeyedEncodingContainerProtocol` + func nestedUnkeyedContainer(forKey key: Key) -> UnkeyedEncodingContainer { + let container = _Encoder.UnkeyedContainer(codingPath: codingPath + [key]) + data[key.stringValue] = .container(container) + return container } - mutating func encodeNil(forKey key: K) throws { - // ignore + func superEncoder() -> Encoder { + fatalError() } - mutating func encode(_ value: T, forKey key: K) throws where T : Encodable { - if value is MultipartPartConvertible { - try multipart.encode(value, at: codingPath + [key]) - } else { - let encoder = _FormDataEncoder(multipart: multipart, codingPath: codingPath + [key]) - try value.encode(to: encoder) + func superEncoder(forKey key: Key) -> Encoder { + fatalError() + } +} + +extension _Encoder.KeyedContainer: _Container { + func getData() -> MultipartFormData { + .keyed(data.mapValues(\.data)) + } +} + +private enum DataOrContainer { + case data(MultipartFormData) + case container(_Container) + + var data: MultipartFormData { + switch self { + case .container(let container): + return container.getData() + case .data(let data): + return data } } +} + +// MARK: _Encoder.UnkeyedContainer + +extension _Encoder { + final class UnkeyedContainer { + var codingPath: [CodingKey] + var data: [DataOrContainer] = [] - mutating func nestedContainer(keyedBy keyType: NestedKey.Type, forKey key: K) -> KeyedEncodingContainer where NestedKey: CodingKey { - return KeyedEncodingContainer(_FormDataKeyedEncoder(multipart: multipart, codingPath: codingPath + [key])) + init(codingPath: [CodingKey]) { + self.codingPath = codingPath + } } +} - mutating func nestedUnkeyedContainer(forKey key: K) -> UnkeyedEncodingContainer { - return _FormDataUnkeyedEncoder(multipart: multipart, codingPath: codingPath + [key]) +extension _Encoder.UnkeyedContainer: UnkeyedEncodingContainer { + var count: Int { data.count } + + func encodeNil() throws { + // skip } - mutating func superEncoder() -> Encoder { - return _FormDataEncoder(multipart: multipart, codingPath: codingPath) + func encode(_ value: T) throws + where T : Encodable + { + if let convertible = value as? MultipartPartConvertible { + if let part = convertible.multipart { + data.append(.data(.single(part))) + } + } else { + let encoder = _Encoder(codingPath: codingPath) + try value.encode(to: encoder) + data.append(.data(encoder.getData())) + } } - mutating func superEncoder(forKey key: K) -> Encoder { - return _FormDataEncoder(multipart: multipart, codingPath: codingPath + [key]) + func nestedContainer(keyedBy keyType: NestedKey.Type) -> KeyedEncodingContainer + where NestedKey: CodingKey + { + let container = _Encoder.KeyedContainer(codingPath: codingPath) + data.append(.container(container)) + return .init(container) } -} -private struct _FormDataUnkeyedEncoder: UnkeyedEncodingContainer { - var count: Int - let multipart: FormDataEncoderContext - var codingPath: [CodingKey] - var index: CodingKey { - return BasicCodingKey.index(0) + func nestedUnkeyedContainer() -> UnkeyedEncodingContainer { + let container = _Encoder.UnkeyedContainer(codingPath: codingPath) + data.append(.container(container)) + return container } - init(multipart: FormDataEncoderContext, codingPath: [CodingKey]) { - self.multipart = multipart - self.codingPath = codingPath - self.count = 0 + func superEncoder() -> Encoder { + fatalError() } +} - mutating func encodeNil() throws { - // ignore +extension _Encoder.UnkeyedContainer: _Container { + func getData() -> MultipartFormData { + .array(data.map(\.data)) } +} + +// MARK: _Encoder.SingleValueContainer + +extension _Encoder { + final class SingleValueContainer { + var codingPath: [CodingKey] + var data: MultipartFormData? - mutating func encode(_ value: T) throws where T : Encodable { - let encoder = _FormDataEncoder(multipart: multipart, codingPath: codingPath + [index]) - try value.encode(to: encoder) + init(codingPath: [CodingKey]) { + self.codingPath = codingPath + } } +} - mutating func nestedContainer(keyedBy keyType: NestedKey.Type) -> KeyedEncodingContainer where NestedKey : CodingKey { - return KeyedEncodingContainer(_FormDataKeyedEncoder(multipart: multipart, codingPath: codingPath + [index])) +extension _Encoder.SingleValueContainer: SingleValueEncodingContainer { + func encodeNil() throws { + // skip } - mutating func nestedUnkeyedContainer() -> UnkeyedEncodingContainer { - return _FormDataUnkeyedEncoder(multipart: multipart, codingPath: codingPath + [index]) + func encode(_ value: T) throws + where T : Encodable + { + if let convertible = value as? MultipartPartConvertible { + if let part = convertible.multipart { + data = .single(part) + } + } else { + let encoder = _Encoder(codingPath: codingPath) + try value.encode(to: encoder) + data = encoder.getData() + } } +} - mutating func superEncoder() -> Encoder { - return _FormDataEncoder(multipart: multipart, codingPath: codingPath + [index]) +extension _Encoder.SingleValueContainer: _Container { + func getData() -> MultipartFormData { + data ?? .empty } } diff --git a/Sources/MultipartKit/MultipartError.swift b/Sources/MultipartKit/MultipartError.swift deleted file mode 100644 index 364a7e1..0000000 --- a/Sources/MultipartKit/MultipartError.swift +++ /dev/null @@ -1,26 +0,0 @@ -/// Errors that can be thrown while working with Multipart. -public enum MultipartError: Error, CustomStringConvertible { - case invalidFormat - case convertibleType(Any.Type) - case convertiblePart(Any.Type, MultipartPart) - case nesting - case missingPart(String) - case missingFilename - - public var description: String { - switch self { - case .invalidFormat: - return "Multipart data is not formatted correctly" - case .convertibleType(let type): - return "\(type) is not convertible to multipart data" - case .convertiblePart(let type, let part): - return "Multipart part is not convertible to \(type): \(part)" - case .nesting: - return "Nested multipart data is not supported" - case .missingPart(let name): - return "No multipart part named '\(name)' was found" - case .missingFilename: - return "Multipart part did not have a filename" - } - } -} diff --git a/Sources/MultipartKit/MultipartFormData.swift b/Sources/MultipartKit/MultipartFormData.swift new file mode 100644 index 0000000..205ece2 --- /dev/null +++ b/Sources/MultipartKit/MultipartFormData.swift @@ -0,0 +1,76 @@ +enum MultipartFormData: Equatable { + typealias Keyed = OrderedDictionary + + case single(MultipartPart) + case array([MultipartFormData]) + case keyed(Keyed) + + init(parts: [MultipartPart], nestingDepth: Int) { + self = parts.reduce(into: .empty) { result, part in + result.insertingPart(part, at: part.name.map(path) ?? [], remainingNestingLevels: nestingDepth) + } + } + + static let empty = MultipartFormData.keyed([:]) + + var array: [MultipartFormData]? { + guard case let .array(array) = self else { return nil } + return array + } + + var dictionary: Keyed? { + guard case let .keyed(dict) = self else { return nil } + return dict + } + + var part: MultipartPart? { + guard case let .single(part) = self else { return nil } + return part + } +} + +private func path(from string: String) -> ArraySlice { + ArraySlice(string.replacingOccurrences(of: "]", with: "").split(omittingEmptySubsequences: false, whereSeparator: { $0 == "[" })) +} + +extension MultipartFormData { + func namedParts() -> [MultipartPart] { + Self.namedParts(from: self) + } + + private static func namedParts(from data: MultipartFormData, path: String? = nil) -> [MultipartPart] { + switch data { + case .array(let array): + return array.flatMap { namedParts(from: $0, path: path.map { "\($0)[]" }) } + case .single(var part): + part.name = path + return [part] + case .keyed(let dictionary): + return dictionary.flatMap { key, value in + namedParts(from: value, path: path.map { "\($0)[\(key)]" } ?? key) + } + } + } +} + +private extension MultipartFormData { + mutating func insertingPart(_ part: MultipartPart, at path: ArraySlice, remainingNestingLevels: Int) { + self = insertPart(part, at: path, remainingNestingLevels: remainingNestingLevels) + } + + func insertPart(_ part: MultipartPart, at path: ArraySlice, remainingNestingLevels: Int) -> MultipartFormData { + guard remainingNestingLevels > 0 else { + return self + } + switch path.first { + case .none: + return .single(part) + case "": + return .array((array ?? []) + [MultipartFormData.empty.insertPart(part, at: path.dropFirst(), remainingNestingLevels: remainingNestingLevels - 1)]) + case let .some(head): + var dictionary = self.dictionary ?? [:] + dictionary[String(head), default: .empty].insertingPart(part, at: path.dropFirst(), remainingNestingLevels: remainingNestingLevels - 1) + return .keyed(dictionary) + } + } +} diff --git a/Sources/MultipartKit/MultipartParser.swift b/Sources/MultipartKit/MultipartParser.swift index c490707..962e2b0 100644 --- a/Sources/MultipartKit/MultipartParser.swift +++ b/Sources/MultipartKit/MultipartParser.swift @@ -9,7 +9,7 @@ /// /// See [Wikipedia](https://en.wikipedia.org/wiki/MIME#Multipart_messages) for more information. /// -/// Seealso `form-urlencoded` encoding where delimiter boundaries are not required. +/// See also `form-urlencoded` encoding where delimiter boundaries are not required. public final class MultipartParser { private enum Error: Swift.Error { case syntax diff --git a/Sources/MultipartKit/MultipartPart.swift b/Sources/MultipartKit/MultipartPart.swift index f4f271f..406b0f3 100644 --- a/Sources/MultipartKit/MultipartPart.swift +++ b/Sources/MultipartKit/MultipartPart.swift @@ -61,6 +61,6 @@ extension Array where Element == MultipartPart { /// Returns all `MultipartPart`s with matching name attribute in `"Content-Disposition"` header. public func allParts(named name: String) -> [MultipartPart] { - return filter { $0.name == name } + filter { $0.name == name } } } diff --git a/Sources/MultipartKit/MultipartPartConvertible.swift b/Sources/MultipartKit/MultipartPartConvertible.swift index f039240..497cf2e 100644 --- a/Sources/MultipartKit/MultipartPartConvertible.swift +++ b/Sources/MultipartKit/MultipartPartConvertible.swift @@ -49,7 +49,6 @@ extension UInt16: MultipartPartConvertible { } extension UInt32: MultipartPartConvertible { } extension UInt64: MultipartPartConvertible { } - extension Float: MultipartPartConvertible { public var multipart: MultipartPart? { return MultipartPart(body: self.description) diff --git a/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+Bucket.swift b/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+Bucket.swift new file mode 100644 index 0000000..2872006 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+Bucket.swift @@ -0,0 +1,36 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension _HashTable { + /// Identifies a particular bucket within a hash table by its offset. + /// Having a dedicated wrapper type for this prevents passing a bucket number + /// to a function that expects a word index, or vice versa. + @usableFromInline + @frozen + internal struct Bucket { + /// The distance of this bucket from the first bucket in the hash table. + @usableFromInline + internal var offset: Int + + @inlinable + @inline(__always) + internal init(offset: Int) { + assert(offset >= 0) + self.offset = offset + } + } +} diff --git a/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+BucketIterator.swift b/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+BucketIterator.swift new file mode 100644 index 0000000..c19a091 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+BucketIterator.swift @@ -0,0 +1,271 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension _HashTable { + /// An iterator construct for visiting a chain of buckets within the hash + /// table. This is a convenient tool for implementing linear probing. + /// + /// Beyond merely providing bucket values, bucket iterators can also tell + /// you their current oposition within the hash table, and (for mutable hash + /// tables) they allow you update the value of the currently visited bucket. + /// (This is useful when implementing simple insertions, for example.) + /// + /// The bucket iterator caches some bucket contents, so if you are looping + /// over an iterator you must be careful to only modify hash table contents + /// through the iterator itself. + /// + /// - Warning: Like `UnsafeHandle`, `BucketIterator` does not have + /// ownership of its underlying storage buffer. You must not escape + /// iterator values outside the closure call that produced the original + /// hash table. + @usableFromInline + internal struct BucketIterator { + @usableFromInline + internal typealias Bucket = _HashTable.Bucket + + /// The hash table we are iterating over. + internal let _hashTable: _UnsafeHashTable + + /// The current position within the hash table. + @usableFromInline + internal var _currentBucket: Bucket + + /// The raw bucket value corresponding to `_currentBucket`. + internal var _currentRawValue: UInt64 + + /// Remaining bits not yet processed from the last word read. + internal var _nextBits: UInt64 + + /// Count of usable bits in `_nextBits`. (They start at bit 0.) + internal var _remainingBitCount: Int + + internal var _wrappedAround = false + + /// Create a new iterator starting at the specified bucket. + @_effects(releasenone) + @usableFromInline + internal init(hashTable: _UnsafeHashTable, startingAt bucket: Bucket) { + assert(hashTable.scale >= _HashTable.minimumScale) + assert(bucket.offset >= 0 && bucket.offset < hashTable.bucketCount) + self._hashTable = hashTable + self._currentBucket = bucket + (self._currentRawValue, self._nextBits, self._remainingBitCount) + = hashTable._startIterator(bucket: bucket) + } + } +} + +extension _HashTable.UnsafeHandle { + @usableFromInline + internal typealias BucketIterator = _HashTable.BucketIterator + + @_effects(releasenone) + @inlinable + @inline(__always) + internal func idealBucket(forHashValue hashValue: Int) -> Bucket { + return Bucket(offset: hashValue & (bucketCount - 1)) + } + + @inlinable + @inline(__always) + internal func idealBucket(for element: Element) -> Bucket { + let hashValue = element._rawHashValue(seed: seed) + return idealBucket(forHashValue: hashValue) + } + + /// Return a bucket iterator for the chain starting at the bucket corresponding + /// to the specified value. + @inlinable + @inline(__always) + internal func bucketIterator(for element: Element) -> BucketIterator { + let bucket = idealBucket(for: element) + return bucketIterator(startingAt: bucket) + } + + /// Return a bucket iterator for the chain starting at the specified bucket. + @inlinable + @inline(__always) + internal func bucketIterator(startingAt bucket: Bucket) -> BucketIterator { + BucketIterator(hashTable: self, startingAt: bucket) + } + + @usableFromInline + @_effects(releasenone) + internal func startFind( + _ startBucket: Bucket + ) -> (iterator: BucketIterator, currentValue: Int?) { + let iterator = bucketIterator(startingAt: startBucket) + return (iterator, iterator.currentValue) + } + + @_effects(readonly) + @usableFromInline + internal func _startIterator( + bucket: Bucket + ) -> (currentBits: UInt64, nextBits: UInt64, remainingBitCount: Int) { + // The `scale == 5` case is special because the last word is only half filled there, + // which is why the code below needs to special case it. + // (For all scales > 5, the last bucket ends exactly on a word boundary.) + + var (word, bit) = self.position(of: bucket) + if bit + scale <= 64 { + // We're in luck, the current bucket is stored entirely within one word. + let w = self[word: word] + let currentRawValue = (w &>> bit) & bucketMask + let c = (scale == 5 && word == wordCount - 1 ? 32 : 64) + let remainingBitCount = c - (bit + scale) + let nextBits = (remainingBitCount == 0 ? 0 : w &>> (bit + scale)) + assert(remainingBitCount >= 0) + assert(bit < c) + return (currentRawValue, nextBits, remainingBitCount) + } else { + // We need to read two words. + assert(scale != 5 || word < wordCount - 1) + assert(bit > 0) + let w1 = self[word: word] + word = self.word(after: word) + let w2 = self[word: word] + let currentRawValue = ((w1 &>> bit) | (w2 &<< (64 - bit))) & bucketMask + let overhang = scale - (64 - bit) + let nextBits = w2 &>> overhang + let c = (scale == 5 && word == wordCount - 1 ? 32 : 64) + let remainingBitCount = c - overhang + return (currentRawValue, nextBits, remainingBitCount) + } + } +} + +extension _HashTable.BucketIterator { + /// The scale of the hash table. A table of scale *n* holds 2^*n* buckets, + /// each of which contain an *n*-bit value. + @inline(__always) + internal var _scale: Int { _hashTable.scale } + + /// The current position within the hash table. + @inlinable + @inline(__always) + internal var currentBucket: Bucket { _currentBucket } + + @usableFromInline + internal var isOccupied: Bool { + @_effects(readonly) + @inline(__always) + get { + _currentRawValue != 0 + } + } + + /// The value of the bucket at the current position in the hash table. + /// Setting this property overwrites the bucket value. + /// + /// A nil value indicates an empty bucket. + @usableFromInline + internal var currentValue: Int? { + @inline(__always) + @_effects(readonly) + get { _hashTable._value(forBucketContents: _currentRawValue) } + @_effects(releasenone) + set { + _hashTable.assertMutable() + let v = _hashTable._bucketContents(for: newValue) + let pattern = v ^ _currentRawValue + + assert(_currentBucket.offset < _hashTable.bucketCount) + let (word, bit) = _hashTable.position(of: _currentBucket) + _hashTable[word: word] ^= pattern &<< bit + let extractedBits = 64 - bit + if extractedBits < _scale { + let word2 = _hashTable.word(after: word) + _hashTable[word: word2] ^= pattern &>> extractedBits + } + _currentRawValue = v + } + } + + /// Advance this iterator to the next bucket within the hash table. + /// The buckets form a cycle, so the last bucket is logically followed + /// by the first. Therefore, the iterator never runs out of buckets -- + /// you must devise some way to guarantee to stop iterating. + /// + /// In the typical case, you stop iterating buckets when you find the + /// element you're looking for, or when you run across an empty bucket + /// (terminating the chain with a negative lookup result). + /// + /// To catch mistakes (and corrupt tables), `advance` traps the second + /// time it needs to wrap around to the beginning of the table. + @usableFromInline + @_effects(releasenone) + internal mutating func advance() { + // Advance to next bucket, checking for wraparound condition. + _currentBucket.offset &+= 1 + if _currentBucket.offset == _hashTable.bucketCount { + guard !_wrappedAround else { + // Prevent wasting battery in an infinite loop if a hash table + // somehow becomes corrupt. + fatalError("Hash table has no unoccupied buckets") + } + _wrappedAround = true + _currentBucket.offset = 0 + } + + // If we have loaded enough bits, eat them and return. + if _remainingBitCount >= _scale { + _currentRawValue = _nextBits & _hashTable.bucketMask + _nextBits &>>= _scale + _remainingBitCount -= _scale + return + } + + // Load the next batch of bits. + var word = _hashTable.position(of: _currentBucket).word + if _remainingBitCount != 0 { + word = _hashTable.word(after: word) + } + let c = (_hashTable.scale == 5 && word == _hashTable.wordCount - 1 ? 32 : 64) + let w = _hashTable[word: word] + _currentRawValue = (_nextBits | (w &<< _remainingBitCount)) & _hashTable.bucketMask + _nextBits = w &>> (_scale - _remainingBitCount) + _remainingBitCount = c - (_scale - _remainingBitCount) + } + + @usableFromInline + @_effects(releasenone) + internal mutating func findNext() -> Int? { + advance() + return currentValue + } + + /// Advance this iterator until it points to an occupied bucket with the + /// specified value, or an unoccupied bucket -- whichever comes first. + @inlinable + @_effects(releasenone) + internal mutating func advance(until expected: Int) { + while isOccupied && currentValue != expected { + advance() + } + } + + /// Advance this iterator until it points to an unoccupied bucket. + /// Useful when inserting an element that we know isn't already in the table. + @inlinable + @_effects(releasenone) + internal mutating func advanceToNextUnoccupiedBucket() { + while isOccupied { + advance() + } + } +} diff --git a/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+Constants.swift b/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+Constants.swift new file mode 100644 index 0000000..7e29f4e --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+Constants.swift @@ -0,0 +1,103 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension _HashTable { + /// The minimum hash table scale. + @usableFromInline + @inline(__always) + internal static var minimumScale: Int { + @_effects(readnone) + get { + 5 + } + } + + /// The maximum hash table scale. + @usableFromInline + @inline(__always) + internal static var maximumScale: Int { + @_effects(readnone) + get { + Swift.min(Int.bitWidth, 56) + } + } + + /// The maximum number of items for which we do not create a hash table. + @usableFromInline + @inline(__always) + internal static var maximumUnhashedCount: Int { + @_effects(readnone) + get { + (1 &<< (minimumScale - 1)) - 1 + } + } + + /// The maximum hash table load factor. + @inline(__always) + internal static var maximumLoadFactor: Double { 3 / 4 } + + /// The minimum hash table load factor. + @inline(__always) + internal static var minimumLoadFactor: Double { 1 / 4 } + + /// The maximum number of items that can be held in a hash table of the given scale. + @usableFromInline + @_effects(readnone) + internal static func minimumCapacity(forScale scale: Int) -> Int { + guard scale >= minimumScale else { return 0 } + let bucketCount = 1 &<< scale + return Int(Double(bucketCount) * minimumLoadFactor) + } + + /// The maximum number of items that can be held in a hash table of the given scale. + @usableFromInline + @_effects(readnone) + internal static func maximumCapacity(forScale scale: Int) -> Int { + guard scale >= minimumScale else { return maximumUnhashedCount } + let bucketCount = 1 &<< scale + return Int(Double(bucketCount) * maximumLoadFactor) + } + + /// The minimum hash table scale that can hold the specified number of elements. + @usableFromInline + @_effects(readnone) + internal static func scale(forCapacity capacity: Int) -> Int { + guard capacity > maximumUnhashedCount else { return 0 } + let capacity = Swift.max(capacity, 1) + // Calculate the minimum number of entries we need to allocate to satisfy + // the maximum load factor. `capacity + 1` below ensures that we always + // leave at least one hole. + let minimumEntries = Swift.max( + Int((Double(capacity) / maximumLoadFactor).rounded(.up)), + capacity + 1) + // The actual number of entries we need to allocate is the lowest power of + // two greater than or equal to the minimum entry count. Calculate its + // exponent. + let scale = (Swift.max(minimumEntries, 2) - 1)._binaryLogarithm() + 1 + assert(scale >= minimumScale && scale < Int.bitWidth) + // The scale is the exponent corresponding to the bucket count. + assert(self.maximumCapacity(forScale: scale) >= capacity) + return scale + } + + /// The count of 64-bit words that a hash table of the specified scale + /// will need to have in its storage. + internal static func wordCount(forScale scale: Int) -> Int { + ((scale &<< scale) + 63) / 64 + } +} + diff --git a/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+UnsafeHandle.swift b/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+UnsafeHandle.swift new file mode 100644 index 0000000..2ff6b87 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable+UnsafeHandle.swift @@ -0,0 +1,527 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +@usableFromInline +internal typealias _UnsafeHashTable = _HashTable.UnsafeHandle + +extension _HashTable { + /// A non-owning handle to hash table storage, implementing higher-level + /// table operations. + /// + /// - Warning: `_UnsafeHashTable` values do not have ownership of their + /// underlying storage buffer. You must not escape these handles outside + /// the closure call that produced them. + @usableFromInline + @frozen + internal struct UnsafeHandle { + @usableFromInline + internal typealias Bucket = _HashTable.Bucket + + /// A pointer to the table header. + @usableFromInline + internal var _header: UnsafeMutablePointer
+ + /// A pointer to bucket storage. + @usableFromInline + internal var _buckets: UnsafeMutablePointer + + #if DEBUG + /// True when this handle does not support table mutations. + /// (This is only checked in debug builds.) + @usableFromInline + internal let _readonly: Bool + #endif + + /// Initialize a new hash table handle for storage at the supplied locations. + @inlinable + @inline(__always) + internal init( + header: UnsafeMutablePointer
, + buckets: UnsafeMutablePointer, + readonly: Bool + ) { + self._header = header + self._buckets = buckets + #if DEBUG + self._readonly = readonly + #endif + } + + /// Check that this handle supports mutating operations. + /// Every member that mutates table data must start by calling this function. + /// This helps preventing COW violations. + /// + /// Note that this is a noop in release builds. + @inlinable + @inline(__always) + func assertMutable() { + #if DEBUG + assert(!_readonly, "Attempt to mutate a hash table through a read-only handle") + #endif + } + } +} + +extension _HashTable.UnsafeHandle { + /// The scale of the hash table. A table of scale *n* holds 2^*n* buckets, + /// each of which contain an *n*-bit value. + @inlinable + @inline(__always) + internal var scale: Int { _header.pointee.scale } +// +// /// The scale corresponding to the last call to `reserveCapacity`. +// /// We store this to make sure we don't shrink the table below its reserved size. +// @inlinable +// @inline(__always) +// internal var reservedScale: Int { _header.pointee.reservedScale } + + /// The hasher seed to use within this hash table. + @inlinable + @inline(__always) + internal var seed: Int { _header.pointee.seed } + + /// A bias value that needs to be added to buckets to convert them into offsets + /// into element storage. (This allows O(1) insertions at the front when the + /// underlying storage supports it.) + @inlinable + @inline(__always) + internal var bias: Int { + get { _header.pointee.bias } + nonmutating set { _header.pointee.bias = newValue } + } + + /// The number of buckets within this hash table. This is always a power of two. + @inlinable + @inline(__always) + internal var bucketCount: Int { 1 &<< scale } + + @inlinable + @inline(__always) + internal var bucketMask: UInt64 { UInt64(truncatingIfNeeded: bucketCount) - 1 } + + /// The number of bits used to store all the buckets in this hash table. + /// Each bucket holds a value that is `scale` bits wide. + @inlinable + @inline(__always) + internal var bitCount: Int { scale &<< scale } + + /// The number of 64-bit words that are available in the storage buffer, + /// rounded up to the nearest whole number if necessary. + @inlinable + @inline(__always) + internal var wordCount: Int { (bitCount + UInt64.bitWidth - 1) / UInt64.bitWidth } + + /// The maximum number of items that can fit into this table. + @inlinable + @inline(__always) + internal var capacity: Int { _HashTable.maximumCapacity(forScale: scale) } + + /// Return the bucket logically following `bucket` in this hash table. + /// The buckets form a cycle, so the last bucket is logically followed by the first. + @inlinable + @inline(__always) + func bucket(after bucket: Bucket) -> Bucket { + var offset = bucket.offset + 1 + if offset == bucketCount { + offset = 0 + } + return Bucket(offset: offset) + } + + /// Return the bucket logically preceding `bucket` in this hash table. + /// The buckets form a cycle, so the first bucket is logically preceded by the last. + @inlinable + @inline(__always) + func bucket(before bucket: Bucket) -> Bucket { + let offset = (bucket.offset == 0 ? bucketCount : bucket.offset) - 1 + return Bucket(offset: offset) + } + + /// Return the index of the word logically following `word` in this hash table. + /// The buckets form a cycle, so the last word is logically followed by the first. + /// + /// Note that the last word may be only partially filled if `scale` is less than 6. + @inlinable + @inline(__always) + func word(after word: Int) -> Int { + var result = word + 1 + if result == wordCount { + result = 0 + } + return result + } + + /// Return the index of the 64-bit storage word that holds the first bit + /// corresponding to `bucket`, along with its bit position within the word. + @inlinable + internal func position(of bucket: Bucket) -> (word: Int, bit: Int) { + let start = bucket.offset &* scale + return (start &>> 6, start & 0x3F) + } +} + +extension _HashTable.UnsafeHandle { + /// Decode and return the logical value corresponding to the specified bucket value. + /// + /// The nil value is represented by an all-zero bit pattern. + /// Other values are stored as the complement of the lowest `scale` bits + /// after taking `bias` into account. + /// The range of representable values is `0 ..< bucketCount - 1`. + /// (Note that the value `bucketCount - 1` is missing from this range, as its + /// encoding is used for `nil`. This isn't an issue, because the maximum load + /// factor guarantees that the hash table will never be completely full.) + @inlinable + func _value(forBucketContents bucketContents: UInt64) -> Int? { + let mask = bucketMask + assert(bucketContents <= mask) + guard bucketContents != 0 else { return nil } + let v = (bucketContents ^ mask) &+ UInt64(truncatingIfNeeded: bias) + return Int(truncatingIfNeeded: v >= mask ? v - mask : v) + } + + /// Encodes the specified logical value into a `scale`-bit bit pattern suitable + /// for storing into a bucket. + /// + /// The nil value is represented by an all-zero bit pattern. + /// Other values are stored as the complement of their lowest `scale` bits. + /// The range of representable values is `0 ..< bucketCount - 1`. + /// (Note that the value `bucketCount - 1` is missing from this range, as it + /// its encoding is used for `nil`. This isn't an issue, because the maximum + /// load factor guarantees that the hash table will never be completely full.) + @inlinable + func _bucketContents(for value: Int?) -> UInt64 { + guard var value = value else { return 0 } + let mask = Int(truncatingIfNeeded: bucketMask) + assert(value >= 0 && value < mask) + value &-= bias + if value < 0 { value += mask } + assert(value >= 0 && value < mask) + return UInt64(truncatingIfNeeded: value ^ mask) + } + + @inlinable + subscript(word word: Int) -> UInt64 { + @inline(__always) get { + assert(word >= 0 && word < bucketCount) + return _buckets[word] + } + @inline(__always) nonmutating set { + assert(word >= 0 && word < bucketCount) + assertMutable() + _buckets[word] = newValue + } + } + + @inlinable + subscript(raw bucket: Bucket) -> UInt64 { + get { + assert(bucket.offset < bucketCount) + let (word, bit) = position(of: bucket) + var value = self[word: word] &>> bit + let extractedBits = 64 - bit + if extractedBits < scale { + let word2 = self.word(after: word) + value &= (1 &<< extractedBits) - 1 + value |= self[word: word2] &<< extractedBits + } + return value & bucketMask + } + nonmutating set { + assertMutable() + assert(bucket.offset < bucketCount) + let mask = bucketMask + assert(newValue <= mask) + let (word, bit) = position(of: bucket) + self[word: word] &= ~(mask &<< bit) + self[word: word] |= newValue &<< bit + let extractedBits = 64 - bit + if extractedBits < scale { + let word2 = self.word(after: word) + self[word: word2] &= ~((1 &<< (scale - extractedBits)) - 1) + self[word: word2] |= newValue &>> extractedBits + } + } + } + + @inlinable + @inline(__always) + func isOccupied(_ bucket: Bucket) -> Bool { + self[raw: bucket] != 0 + } + + /// Return or update the current value stored in the specified bucket. + /// A nil value indicates that the bucket is empty. + @inlinable + internal subscript(bucket: Bucket) -> Int? { + get { + let contents = self[raw: bucket] + return _value(forBucketContents: contents) + } + nonmutating set { + assertMutable() + let v = _bucketContents(for: newValue) + self[raw: bucket] = v + } + } +} + +extension _UnsafeHashTable { + @inlinable + internal func _find( + _ item: Base.Element, + in elements: Base + ) -> (index: Int?, bucket: Bucket) + where Base.Element: Hashable { + let start = idealBucket(for: item) + var (iterator, value) = startFind(start) + while let index = value { + if elements[_offset: index] == item { + return (index, iterator.currentBucket) + } + value = iterator.findNext() + } + return (nil, iterator.currentBucket) + } +} + +extension _UnsafeHashTable { + @usableFromInline + internal func firstOccupiedBucketInChain(with bucket: Bucket) -> Bucket { + var bucket = bucket + repeat { + bucket = self.bucket(before: bucket) + } while isOccupied(bucket) + return self.bucket(after: bucket) + } + + @inlinable + internal func delete( + bucket: Bucket, + hashValueGenerator: (Int, Int) -> Int // (offset, seed) -> hashValue + ) { + assertMutable() + var it = bucketIterator(startingAt: bucket) + assert(it.isOccupied) + it.advance() + guard it.isOccupied else { + // Fast path: Don't get the start bucket when there's nothing to do. + self[bucket] = nil + return + } + // If we've put a hole in the middle of a collision chain, some element after + // the hole may belong where the new hole is. + + // Find the first bucket in the collision chain that contains the entry we've just deleted. + let start = firstOccupiedBucketInChain(with: bucket) + var hole = bucket + + while it.isOccupied { + let hash = hashValueGenerator(it.currentValue!, seed) + let candidate = idealBucket(forHashValue: hash) + + // Does this element belong between start and hole? We need two + // separate tests depending on whether [start, hole] wraps around the + // end of the storage. + let c0 = candidate.offset >= start.offset + let c1 = candidate.offset <= hole.offset + if start.offset <= hole.offset ? (c0 && c1) : (c0 || c1) { + // Fill the hole. Here we are mutating table contents behind the back of + // the iterator; this is okay since we know we are never going to revisit + // `hole` with it. + self[hole] = it.currentValue + hole = it.currentBucket + } + it.advance() + } + self[hole] = nil + } +} + +extension _UnsafeHashTable { + @inlinable + internal func adjustContents( + preparingForInsertionOfElementAtOffset offset: Int, + in elements: Base + ) where Base.Element: Hashable { + assertMutable() + let index = elements._index(at: offset) + if offset < elements.count / 2 { + self.bias += 1 + if offset <= capacity / 3 { + var i = 1 + for item in elements[..= offset { + it.currentValue = value + 1 + } + it.advance() + } while it.currentBucket.offset != 0 + } + } + } +} + +extension _UnsafeHashTable { + @inlinable + @inline(__always) + internal func adjustContents( + preparingForRemovalOf index: Base.Index, + in elements: Base + ) where Base.Element: Hashable { + let next = elements.index(after: index) + adjustContents(preparingForRemovalOf: index ..< next, in: elements) + } + + @inlinable + internal func adjustContents( + preparingForRemovalOf bounds: Range, + in elements: Base + ) where Base.Element: Hashable { + assertMutable() + let startOffset = elements._offset(of: bounds.lowerBound) + let endOffset = elements._offset(of: bounds.upperBound) + let c = endOffset - startOffset + guard c > 0 else { return } + let remainingCount = elements.count - c + + if startOffset >= remainingCount / 2 { + let tailCount = elements.count - endOffset + if tailCount < capacity / 3 { + var i = endOffset + for item in elements[bounds.upperBound...] { + var it = self.bucketIterator(for: item) + it.advance(until: i) + it.currentValue = i - c + i += 1 + } + } else { + var it = bucketIterator(startingAt: Bucket(offset: 0)) + repeat { + if let value = it.currentValue { + if value >= endOffset { + it.currentValue = value - c + } else { + assert(value < startOffset) + } + } + it.advance() + } while it.currentBucket.offset != 0 + } + } else { + if startOffset < capacity / 3 { + var i = 0 + for item in elements[..= endOffset) + } + } + it.advance() + } while it.currentBucket.offset != 0 + } + self.bias -= c + } + } +} + +extension _UnsafeHashTable { + /// Fill an empty hash table by populating it with data from `elements`. + /// + /// - Parameter elements: A random-access collection for which this table is being generated. + @inlinable + internal func fill( + uncheckedUniqueElements elements: C + ) where C.Element: Hashable { + assertMutable() + assert(elements.count <= capacity) + // Iterate over elements and insert their offset into the hash table. + var offset = 0 + for index in elements.indices { + // Find the insertion position. We know that we're inserting a new item, + // so there is no need to compare it with any of the existing ones. + var it = bucketIterator(for: elements[index]) + it.advanceToNextUnoccupiedBucket() + it.currentValue = offset + offset += 1 + } + } + + /// Fill an empty hash table by populating it with data from `elements`. + /// + /// - Parameter elements: A random-access collection for which this table is being generated. + /// - Parameter stoppingOnFirstDuplicateValue: If true, check for duplicate values and stop inserting items when one is found. + /// - Returns: `(success, index)` where `success` is a boolean value indicating that every value in `elements` was successfully inserted. A false success indicates that duplicate elements have been found; in this case `index` points to the first duplicate value; otherwise `index` is set to `elements.endIndex`. + @inlinable + internal func fill( + untilFirstDuplicateIn elements: C + ) -> (success: Bool, end: C.Index) + where C.Element: Hashable { + assertMutable() + assert(elements.count <= capacity) + // Iterate over elements and insert their offset into the hash table. + var offset = 0 + for index in elements.indices { + // Find the insertion position. We know that we're inserting a new item, + // so there is no need to compare it with any of the existing ones. + var it = bucketIterator(for: elements[index]) + while let offset = it.currentValue { + guard elements[_offset: offset] != elements[index] else { + return (false, index) + } + it.advance() + } + it.currentValue = offset + offset += 1 + } + return (true, elements.endIndex) + } +} diff --git a/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable.swift b/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable.swift new file mode 100644 index 0000000..dd4befb --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/HashTable/_HashTable.swift @@ -0,0 +1,190 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +@usableFromInline +@frozen +internal struct _HashTable { + @usableFromInline + internal var _storage: Storage + + @inlinable + @inline(__always) + internal init(_ storage: Storage) { + _storage = storage + } +} + +extension _HashTable { + /// A class holding hash table storage for a `OrderedSet` collection. + /// Values in the hash table are offsets into separate element storage, so + /// this class doesn't need to be generic over `OrderedSet`'s `Element` type. + @usableFromInline + internal final class Storage + : ManagedBuffer + {} +} + +extension _HashTable { + /// Allocate a new empty hash table buffer of the specified scale. + @usableFromInline + @_effects(releasenone) + internal init(scale: Int, reservedScale: Int = 0) { + assert(scale >= Self.minimumScale && scale <= Self.maximumScale) + let wordCount = Self.wordCount(forScale: scale) + let storage = Storage.create( + minimumCapacity: wordCount, + makingHeaderWith: { object in + #if COLLECTIONS_DETERMINISTIC_HASHING + let seed = scale << 6 + #else + let seed = Int(bitPattern: Unmanaged.passUnretained(object).toOpaque()) + #endif + return Header(scale: scale, reservedScale: reservedScale, seed: seed) + }) + storage.withUnsafeMutablePointerToElements { elements in + elements.initialize(repeating: 0, count: wordCount) + } + self.init(unsafeDowncast(storage, to: Storage.self)) + } + + /// Populate a new hash table with data from `elements`. + /// + /// - Parameter scale: The desired hash table scale or nil to use the minimum scale that satisfies invariants. + /// - Parameter reservedScale: The reserved scale to remember in the returned storage. + /// - Parameter duplicates: The strategy to use to handle duplicate items. + /// - Returns: `(storage, index)` where `storage` is a storage instance. The contents of `storage` reflects all elements in `contents[contents.startIndex ..< index]`. `index` is usually `contents.endIndex`, except when the function was asked to reject duplicates, in which case `index` addresses the first duplicate element in `contents` (if any). + @inlinable + @inline(never) + @_effects(releasenone) + static func create( + uncheckedUniqueElements elements: C, + scale: Int? = nil, + reservedScale: Int = 0 + ) -> _HashTable? + where C.Element: Hashable { + let minScale = Self.scale(forCapacity: elements.count) + let scale = Swift.max(Swift.max(scale ?? 0, minScale), + reservedScale) + if scale < Self.minimumScale { return nil } + let hashTable = Self(scale: scale, reservedScale: reservedScale) + hashTable.update { handle in + handle.fill(uncheckedUniqueElements: elements) + } + return hashTable + } + + /// Populate a new hash table with data from `elements`. + /// + /// - Parameter scale: The desired hash table scale or nil to use the minimum scale that satisfies invariants. + /// - Parameter reservedScale: The reserved scale to remember in the returned storage. + /// - Parameter duplicates: The strategy to use to handle duplicate items. + /// - Returns: `(storage, index)` where `storage` is a storage instance. The contents of `storage` reflects all elements in `contents[contents.startIndex ..< index]`. `index` is usually `contents.endIndex`, except when the function was asked to reject duplicates, in which case `index` addresses the first duplicate element in `contents` (if any). + @inlinable + @inline(never) + @_effects(releasenone) + static func create( + untilFirstDuplicateIn elements: C, + scale: Int? = nil, + reservedScale: Int = 0 + ) -> (hashTable: _HashTable?, end: C.Index) + where C.Element: Hashable { + let minScale = Self.scale(forCapacity: elements.count) + let scale = Swift.max(Swift.max(scale ?? 0, minScale), + reservedScale) + if scale < Self.minimumScale { + // Don't hash anything. + if elements.count < 2 { return (nil, elements.endIndex) } + var temp: [C.Element] = [] + temp.reserveCapacity(elements.count) + for i in elements.indices { + let item = elements[i] + guard !temp.contains(item) else { return (nil, i) } + temp.append(item) + } + return (nil, elements.endIndex) + } + let hashTable = Self(scale: scale, reservedScale: reservedScale) + let (_, index) = hashTable.update { handle in + handle.fill(untilFirstDuplicateIn: elements) + } + return (hashTable, index) + } + + /// Create and return a new copy of this instance. The result has the same + /// scale and seed, and contains the exact same bucket data as the original instance. + @usableFromInline + @_effects(releasenone) + internal func copy() -> _HashTable { + self.read { handle in + let wordCount = handle.wordCount + let new = Storage.create( + minimumCapacity: wordCount, + makingHeaderWith: { _ in handle._header.pointee }) + new.withUnsafeMutablePointerToElements { elements in + elements.initialize(from: handle._buckets, count: wordCount) + } + return Self(unsafeDowncast(new, to: Storage.self)) + } + } +} + + + +extension _HashTable { + /// Call `body` with a hash table handle suitable for read-only use. + /// + /// - Warning: The handle supplied to `body` is only valid for the duration of + /// the closure call. The closure must not escape it outside the call. + @inlinable + @inline(__always) + internal func read(_ body: (_UnsafeHashTable) throws -> R) rethrows -> R { + try _storage.withUnsafeMutablePointers { header, elements in + let handle = _UnsafeHashTable(header: header, buckets: elements, readonly: true) + return try body(handle) + } + } + + /// Call `body` with a hash table handle suitable for mutating use. + /// + /// - Warning: The handle supplied to `body` is only valid for the duration of + /// the closure call. The closure must not escape it outside the call. + @inlinable + @inline(__always) + internal func update(_ body: (_UnsafeHashTable) throws -> R) rethrows -> R { + try _storage.withUnsafeMutablePointers { header, elements in + let handle = _UnsafeHashTable(header: header, buckets: elements, readonly: false) + return try body(handle) + } + } +} + +extension _HashTable { + @inlinable + internal var capacity: Int { + _storage.header.capacity + } + + @inlinable + internal var scale: Int { + _storage.header.scale + } + + @inlinable + internal var reservedScale: Int { + _storage.header.reservedScale + } +} diff --git a/Sources/MultipartKit/OrderedCollections/HashTable/_Hashtable+Header.swift b/Sources/MultipartKit/OrderedCollections/HashTable/_Hashtable+Header.swift new file mode 100644 index 0000000..b53eff1 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/HashTable/_Hashtable+Header.swift @@ -0,0 +1,105 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension _HashTable { + /// The storage header for hash table buffers. + /// + /// Note that we don't store the number of items currently in the table; + /// that information can be easily retrieved from the element storage. + @usableFromInline + internal struct Header { + /// We are packing the scale data into the lower bits of the seed & bias + /// to saves a bit of space that would be otherwise taken up by padding. + /// + /// Layout: + /// + /// 63 6 5 0 + /// ├──────────────────────────────────────────────┼────────┤ + /// │ seed │ scale │ + /// └──────────────────────────────────────────────┴────────┘ + /// 63 6 5 0 + /// ├──────────────────────────────────────────────┼────────┤ + /// │ bias │ rsvd │ + /// └──────────────────────────────────────────────┴────────┘ + @usableFromInline + var _scaleAndSeed: UInt64 + @usableFromInline + var _reservedScaleAndBias: UInt64 + + init(scale: Int, reservedScale: Int, seed: Int) { + assert(scale >= _HashTable.minimumScale && scale <= _HashTable.maximumScale) + assert(reservedScale >= 0 && reservedScale <= _HashTable.maximumScale) + _scaleAndSeed = UInt64(truncatingIfNeeded: seed) << (Swift.max(UInt64.bitWidth - Int.bitWidth, 6)) + _scaleAndSeed &= ~0x3F + _scaleAndSeed |= UInt64(truncatingIfNeeded: scale) + _reservedScaleAndBias = UInt64(truncatingIfNeeded: reservedScale) + assert(self.scale == scale) + assert(self.reservedScale == reservedScale) + assert(self.bias == 0) + } + + /// The scale of the hash table. A table of scale *n* holds 2^*n* buckets, + /// each of which contain an *n*-bit value. + @inlinable + @inline(__always) + var scale: Int { Int(_scaleAndSeed & 0x3F) } + + /// The scale corresponding to the last call to `reserveCapacity`. + /// We remember this here to make sure we don't shrink the table below its reserved size. + @inlinable + var reservedScale: Int { + @inline(__always) + get { Int(_reservedScaleAndBias & 0x3F) } + set { + assert(newValue >= 0 && newValue < 64) + _reservedScaleAndBias &= ~0x3F + _reservedScaleAndBias |= UInt64(truncatingIfNeeded: newValue) & 0x3F + } + } + + /// The hasher seed to use within this hash table. + @inlinable + @inline(__always) + var seed: Int { + Int(truncatingIfNeeded: _scaleAndSeed) + } + + /// A bias value that needs to be added to buckets to convert them into offsets + /// into element storage. (This allows O(1) insertions at the front when the + /// underlying storage supports it.) + @inlinable + var bias: Int { + @inline(__always) + get { Int(truncatingIfNeeded: _reservedScaleAndBias) &>> 6 } + set { + let limit = (1 &<< scale) - 1 + var bias = newValue + if bias < 0 { bias += limit } + if bias >= limit { bias -= limit } + assert(bias >= 0 && bias < limit) + _reservedScaleAndBias &= 0x3F + _reservedScaleAndBias |= UInt64(truncatingIfNeeded: bias) &<< 6 + assert(self.bias >= 0 && self.bias < limit) + } + } + + /// The maximum number of items that can fit into this table. + @inlinable + @inline(__always) + var capacity: Int { _HashTable.maximumCapacity(forScale: scale) } + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Equatable.swift b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Equatable.swift new file mode 100644 index 0000000..3d909fd --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Equatable.swift @@ -0,0 +1,29 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension OrderedDictionary: Equatable where Value: Equatable { + /// Returns a Boolean value indicating whether two values are equal. + /// + /// Two ordered dictionaries are considered equal if they contain the same + /// key-value pairs, in the same order. + /// + /// - Complexity: O(`min(left.count, right.count)`) + @inlinable + internal static func ==(left: Self, right: Self) -> Bool { + left._keys == right._keys && left._values == right._values + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+ExpressibleByDictionaryLiteral.swift b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+ExpressibleByDictionaryLiteral.swift new file mode 100644 index 0000000..686c68f --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+ExpressibleByDictionaryLiteral.swift @@ -0,0 +1,40 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension OrderedDictionary: ExpressibleByDictionaryLiteral { + /// Creates a new ordered dictionary from the contents of a dictionary + /// literal. + /// + /// Duplicate elements in the literal are allowed, but the resulting + /// set will only contain the first occurrence of each. + /// + /// Do not call this initializer directly. It is used by the compiler when you + /// use a dictionary literal. Instead, create a new ordered dictionary using a + /// dictionary literal as its value by enclosing a comma-separated list of + /// values in square brackets. You can use an array literal anywhere a set is + /// expected by the type context. + /// + /// - Parameter elements: A variadic list of key-value pairs for the new + /// dictionary. + /// + /// - Complexity: O(`elements.count`) if `Key` implements + /// high-quality hashing. + @inlinable + internal init(dictionaryLiteral elements: (Key, Value)...) { + self.init(uniqueKeysWithValues: elements) + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Initializers.swift b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Initializers.swift new file mode 100644 index 0000000..9bf6f50 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Initializers.swift @@ -0,0 +1,115 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension OrderedDictionary { + /// Creates an empty dictionary. + /// + /// This initializer is equivalent to initializing with an empty dictionary + /// literal. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal init() { + self._keys = OrderedSet() + self._values = [] + } +} + +extension OrderedDictionary { + /// Creates a new dictionary from the key-value pairs in the given sequence. + /// + /// You use this initializer to create a dictionary when you have a sequence + /// of key-value tuples with unique keys. Passing a sequence with duplicate + /// keys to this initializer results in a runtime error. If your + /// sequence might have duplicate keys, use the + /// `Dictionary(_:uniquingKeysWith:)` initializer instead. + /// + /// - Parameter keysAndValues: A sequence of key-value pairs to use for + /// the new dictionary. Every key in `keysAndValues` must be unique. + /// + /// - Returns: A new dictionary initialized with the elements of + /// `keysAndValues`. + /// + /// - Precondition: The sequence must not have duplicate keys. + /// + /// - Complexity: Expected O(*n*) on average, where *n* is the count if + /// key-value pairs, if `Key` implements high-quality hashing. + @inlinable + internal init( + uniqueKeysWithValues keysAndValues: S + ) where S.Element == (key: Key, value: Value) { + if S.self == Dictionary.self { + self.init(_uncheckedUniqueKeysWithValues: keysAndValues) + return + } + self.init() + reserveCapacity(keysAndValues.underestimatedCount) + for (key, value) in keysAndValues { + guard _keys._append(key).inserted else { + preconditionFailure("Duplicate key: '\(key)'") + } + _values.append(value) + } + } + + /// Creates a new dictionary from the key-value pairs in the given sequence. + /// + /// You use this initializer to create a dictionary when you have a sequence + /// of key-value tuples with unique keys. Passing a sequence with duplicate + /// keys to this initializer results in a runtime error. If your + /// sequence might have duplicate keys, use the + /// `Dictionary(_:uniquingKeysWith:)` initializer instead. + /// + /// - Parameter keysAndValues: A sequence of key-value pairs to use for + /// the new dictionary. Every key in `keysAndValues` must be unique. + /// + /// - Returns: A new dictionary initialized with the elements of + /// `keysAndValues`. + /// + /// - Precondition: The sequence must not have duplicate keys. + /// + /// - Complexity: Expected O(*n*) on average, where *n* is the count if + /// key-value pairs, if `Key` implements high-quality hashing. + @inlinable + internal init( + uniqueKeysWithValues keysAndValues: S + ) where S.Element == (Key, Value) { + self.init() + reserveCapacity(keysAndValues.underestimatedCount) + for (key, value) in keysAndValues { + guard _keys._append(key).inserted else { + preconditionFailure("Duplicate key: '\(key)'") + } + _values.append(value) + } + } +} + +extension OrderedDictionary { + @inlinable + internal init( + _uncheckedUniqueKeysWithValues keysAndValues: S + ) where S.Element == (key: Key, value: Value) { + self.init() + reserveCapacity(keysAndValues.underestimatedCount) + for (key, value) in keysAndValues { + _keys._appendNew(key) + _values.append(value) + } + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Partial RangeReplaceableCollection.swift b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Partial RangeReplaceableCollection.swift new file mode 100644 index 0000000..133ea28 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Partial RangeReplaceableCollection.swift @@ -0,0 +1,43 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +// The parts of RangeReplaceableCollection that OrderedDictionary is able to implement. + +extension OrderedDictionary { + /// Reserves enough space to store the specified number of elements. + /// + /// This method ensures that the dictionary has unique, mutable, contiguous + /// storage, with space allocated for at least the requested number of + /// elements. + /// + /// If you are adding a known number of elements to a dictionary, call this + /// method once before the first insertion to avoid multiple reallocations. + /// + /// Do not call this method in a loop -- it does not use an exponential + /// allocation strategy, so doing that can result in quadratic instead of + /// linear performance. + /// + /// - Parameter minimumCapacity: The minimum number of elements that the + /// dictionary should be able to store without reallocating its storage. + /// + /// - Complexity: O(`max(count, minimumCapacity)`) + @inlinable + internal mutating func reserveCapacity(_ minimumCapacity: Int) { + self._keys.reserveCapacity(minimumCapacity) + self._values.reserveCapacity(minimumCapacity) + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Sequence.swift b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Sequence.swift new file mode 100644 index 0000000..ed3758b --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary+Sequence.swift @@ -0,0 +1,68 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension OrderedDictionary: Sequence { + /// The element type of a dictionary: a tuple containing an individual + /// key-value pair. + internal typealias Element = (key: Key, value: Value) + + /// The type that allows iteration over an ordered dictionary's elements. + internal struct Iterator: IteratorProtocol { + @usableFromInline + internal let _base: OrderedDictionary + + @usableFromInline + internal var _position: Int + + @inlinable + @inline(__always) + internal init(_base: OrderedDictionary) { + self._base = _base + self._position = 0 + } + + /// Advances to the next element and returns it, or nil if no next + /// element exists. + /// + /// - Complexity: O(1) + @inlinable + internal mutating func next() -> Element? { + guard _position < _base._values.count else { return nil } + let result = (_base._keys[_position], _base._values[_position]) + _position += 1 + return result + } + } + + /// The number of elements in the collection. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal var underestimatedCount: Int { + count + } + + /// Returns an iterator over the elements of this collection. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal func makeIterator() -> Iterator { + Iterator(_base: self) + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary.swift b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary.swift new file mode 100644 index 0000000..c93f113 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedDictionary/OrderedDictionary.swift @@ -0,0 +1,470 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +/// An ordered collection of key-value pairs. +/// +/// Like the standard `Dictionary`, ordered dictionaries use a hash table to +/// ensure that no two entries have the same keys, and to efficiently look up +/// values corresponding to specific keys. However, like an `Array` (and +/// unlike `Dictionary`), ordered dictionaries maintain their elements in a +/// particular user-specified order, and they support efficient random-access +/// traversal of their entries. +/// +/// `OrderedDictionary` is a useful alternative to `Dictionary` when the order +/// of elements is important, or when you need to be able to efficiently access +/// elements at various positions within the collection. +/// +/// You can create an ordered dictionary with any key type that conforms to the +/// `Hashable` protocol. +/// +/// let responses: OrderedDictionary = [ +/// 200: "OK", +/// 403: "Access forbidden", +/// 404: "File not found", +/// 500: "Internal server error", +/// ] +/// +/// ### Equality of Ordered Dictionaries +/// +/// Two ordered dictionaries are considered equal if they contain the same +/// elements, and *in the same order*. This matches the concept of equality of +/// an `Array`, and it is different from the unordered `Dictionary`. +/// +/// let a: OrderedDictionary = [1: "one", 2: "two"] +/// let b: OrderedDictionary = [2: "two", 1: "one"] +/// a == b // false +/// b.swapAt(0, 1) // `b` now has value [1: "one", 2: "two"] +/// a == b // true +/// +/// (`OrderedDictionary` only conforms to `Equatable` when its `Value` is +/// equatable.) +/// +/// ### Dictionary Operations +/// +/// `OrderedDictionary` provides many of the same operations as `Dictionary`. +/// +/// For example, you can look up and add/remove values using the familiar +/// key-based subscript, returning an optional value: +/// +/// var dictionary: OrderedDictionary = [:] +/// dictionary["one"] = 1 +/// dictionary["two"] = 2 +/// dictionary["three"] // nil +/// // dictionary is now ["one": 1, "two": 2] +/// +/// If a new entry is added using the subscript setter, it gets appended to the +/// end of the dictionary. (So that by default, the dictionary contains its +/// elements in the order they were originally inserted.) +/// +/// `OrderedDictionary` also implements the variant of this subscript that takes +/// a default value. Like with `Dictionary`, this is useful when you want to +/// perform in-place mutations on values: +/// +/// let text = "short string" +/// var counts: OrderedDictionary = [:] +/// for character in text { +/// counts[character, default: 0] += 1 +/// } +/// // counts is ["s": 2, "h": 1, "o": 1, +/// // "r": 2, "t": 2, " ": 1, +/// // "i": 1, "n": 1, "g": 1] +/// +/// If the `Value` type implements reference semantics, or when you need to +/// perform a series of individual mutations on the values, the closure-based +/// `modifyValue(forKey:default:_:)` method provides an easier-to-use +/// alternative to the defaulted key-based subscript. +/// +/// let text = "short string" +/// var counts: OrderedDictionary = [:] +/// for character in text { +/// counts.modifyValue(forKey: character, default: 0) { value in +/// value += 1 +/// } +/// } +/// // Same result as before +/// +/// (This isn't currently available on the regular `Dictionary`.) +/// +/// The `Dictionary` type's original `updateValue(_:forKey:)` method is also +/// available, and so is `index(forKey:)`, grouping/uniquing initializers +/// (`init(uniqueKeysWithValues:)`, `init(_:uniquingKeysWith:)`, +/// `init(grouping:by:)`), methods for merging one dictionary with another +/// (`merge`, `merging`), filtering dictionary entries (`filter(_:)`), +/// transforming values (`mapValues(_:)`), and a combination of these two +/// (`compactMapValues(_:)`). +/// +/// ### Sequence and Collection Operations +/// +/// Ordered dictionaries use integer indices representing offsets from the +/// beginning of the collection. However, to avoid ambiguity between key-based +/// and indexing subscripts, `OrderedDictionary` doesn't directly conform to +/// `Collection`. Instead, it only conforms to `Sequence`, and provides a +/// random-access collection view over its key-value pairs: +/// +/// responses[0] // `nil` (key-based subscript) +/// responses.elements[0] // `(200, "OK")` (index-based subscript) +/// +/// Because ordered dictionaries need to maintain unique keys, neither +/// `OrderedDictionary` nor its `elements` view can conform to the full +/// `MutableCollection` or `RangeReplaceableCollection` protocols. However, they +/// are able to partially implement requirements: they support mutations +/// that merely change the order of elements, or just remove a subset of +/// existing members: +/// +/// // Permutation operations from MutableCollection: +/// func swapAt(_ i: Int, _ j: Int) +/// func partition(by predicate: (Element) throws -> Bool) -> rethrows Int +/// func sort() where Element: Comparable +/// func sort(by predicate: (Element, Element) throws -> Bool) rethrows +/// func shuffle() +/// func shuffle(using generator: inout T) +/// +/// // Removal operations from RangeReplaceableCollection: +/// func removeAll(keepingCapacity: Bool = false) +/// func remove(at index: Int) -> Element +/// func removeSubrange(_ bounds: Range) +/// func removeLast() -> Element +/// func removeLast(_ n: Int) +/// func removeFirst() -> Element +/// func removeFirst(_ n: Int) +/// func removeAll(where shouldBeRemoved: (Element) throws -> Bool) rethrows +/// +/// `OrderedDictionary` also implements `reserveCapacity(_)` from +/// `RangeReplaceableCollection`, to allow for efficient insertion of a known +/// number of elements. (However, unlike `Array` and `Dictionary`, +/// `OrderedDictionary` does not provide a `capacity` property.) +/// +/// ### Keys and Values Views +/// +/// Like the standard `Dictionary`, `OrderedDictionary` provides `keys` and +/// `values` properties that provide lightweight views into the corresponding +/// parts of the dictionary. +/// +/// The `keys` collection is of type `OrderedSet`, containing all the keys +/// in the original dictionary. +/// +/// let d: OrderedDictionary = [2: "two", 1: "one", 0: "zero"] +/// d.keys // [2, 1, 0] as OrderedSet +/// +/// The `keys` property is read-only, so you cannot mutate the dictionary +/// through it. However, it returns an ordinary ordered set value, which can be +/// copied out and then mutated if desired. (Such mutations won't affect the +/// original dictionary value.) +/// +/// The `values` collection is a mutable random-access collection of the values +/// in the dictionary: +/// +/// d.values // "two", "one", "zero" +/// d.values[2] = "nada" +/// // `d` is now [2: "two", 1: "one", 0: "nada"] +/// d.values.sort() +/// // `d` is now [2: "nada", 1: "one", 0: "two"] +/// +/// Both views store their contents in regular `Array` values, accessible +/// through their `elements` property. +/// +/// ## Performance +/// +/// Like the standard `Dictionary` type, the performance of hashing operations +/// in `OrderedDictionary` is highly sensitive to the quality of hashing +/// implemented by the `Key` type. Failing to correctly implement hashing can +/// easily lead to unacceptable performance, with the severity of the effect +/// increasing with the size of the hash table. +/// +/// In particular, if a certain set of keys all produce the same hash value, +/// then hash table lookups regress to searching an element in an unsorted +/// array, i.e., a linear operation. To ensure hashed collection types exhibit +/// their target performance, it is important to ensure that such collisions +/// cannot be induced merely by adding a particular list of keys to the +/// dictionary. +/// +/// The easiest way to achieve this is to make sure `Key` implements hashing +/// following `Hashable`'s documented best practices. The conformance must +/// implement the `hash(into:)` requirement, and every bit of information that +/// is compared in `==` needs to be combined into the supplied `Hasher` value. +/// When used correctly, `Hasher` produces high-quality, randomly seeded hash +/// values that prevent repeatable hash collisions. +/// +/// When `Key` correctly conforms to `Hashable`, key-based lookups in an ordered +/// dictionary is expected to take O(1) equality checks on average. Hash +/// collisions can still occur organically, so the worst-case lookup performance +/// is technically still O(*n*) (where *n* is the size of the dictionary); +/// however, long lookup chains are unlikely to occur in practice. +/// +/// ## Implementation Details +/// +/// An ordered dictionary consists of an ordered set of keys, alongside a +/// regular `Array` value that contains their associated values. +internal struct OrderedDictionary { + @usableFromInline + internal var _keys: OrderedSet + + @usableFromInline + internal var _values: ContiguousArray + + @inlinable + @inline(__always) + internal init( + _uniqueKeys keys: OrderedSet, + values: ContiguousArray + ) { + self._keys = keys + self._values = values + } +} + +extension OrderedDictionary { + /// A read-only collection view for the keys contained in this dictionary, as + /// an `OrderedSet`. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal var keys: OrderedSet { _keys } +} + +extension OrderedDictionary { + /// The number of elements in the dictionary. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal var count: Int { _values.count } +} + +extension OrderedDictionary { + /// Accesses the value associated with the given key for reading and writing. + /// + /// This *key-based* subscript returns the value for the given key if the key + /// is found in the dictionary, or `nil` if the key is not found. + /// + /// The following example creates a new dictionary and prints the value of a + /// key found in the dictionary (`"Coral"`) and a key not found in the + /// dictionary (`"Cerise"`). + /// + /// var hues: OrderedDictionary = ["Heliotrope": 296, "Coral": 16, "Aquamarine": 156] + /// print(hues["Coral"]) + /// // Prints "Optional(16)" + /// print(hues["Cerise"]) + /// // Prints "nil" + /// + /// When you assign a value for a key and that key already exists, the + /// dictionary overwrites the existing value. If the dictionary doesn't + /// contain the key, the key and value are added as a new key-value pair. + /// + /// Here, the value for the key `"Coral"` is updated from `16` to `18` and a + /// new key-value pair is added for the key `"Cerise"`. + /// + /// hues["Coral"] = 18 + /// print(hues["Coral"]) + /// // Prints "Optional(18)" + /// + /// hues["Cerise"] = 330 + /// print(hues["Cerise"]) + /// // Prints "Optional(330)" + /// + /// If you assign `nil` as the value for the given key, the dictionary + /// removes that key and its associated value. + /// + /// In the following example, the key-value pair for the key `"Aquamarine"` + /// is removed from the dictionary by assigning `nil` to the key-based + /// subscript. + /// + /// hues["Aquamarine"] = nil + /// print(hues) + /// // Prints "["Coral": 18, "Heliotrope": 296, "Cerise": 330]" + /// + /// - Parameter key: The key to find in the dictionary. + /// + /// - Returns: The value associated with `key` if `key` is in the dictionary; + /// otherwise, `nil`. + /// + /// - Complexity: Looking up values in the dictionary through this subscript + /// has an expected complexity of O(1) hashing/comparison operations on + /// average, if `Key` implements high-quality hashing. Updating the + /// dictionary also has an amortized expected complexity of O(1) -- + /// although individual updates may need to copy or resize the dictionary's + /// underlying storage. + @inlinable + internal subscript(key: Key) -> Value? { + get { + guard let index = _keys.firstIndex(of: key) else { return nil } + return _values[index] + } + set { + // We have a separate `set` in addition to `_modify` in hopes of getting + // rid of `_modify`'s swapAt dance in the usua case where the calle just + // wants to assign a new value. + let (index, bucket) = _keys._find(key) + switch (index, newValue) { + case let (index?, newValue?): // Assign + _values[index] = newValue + case let (index?, nil): // Remove + _keys._removeExistingMember(at: index, in: bucket) + _values.remove(at: index) + case let (nil, newValue?): // Insert + _keys._appendNew(key, in: bucket) + _values.append(newValue) + case (nil, nil): // Noop + break + } + } + _modify { + let (index, bucket) = _keys._find(key) + + // To support in-place mutations better, we swap the value to the end of + // the array, pop it off, then put things back in place when we're done. + var value: Value? = nil + if let index = index { + _values.swapAt(index, _values.count - 1) + value = _values.removeLast() + } + + defer { + switch (index, value) { + case let (index?, value?): // Assign + _values.append(value) + _values.swapAt(index, _values.count - 1) + case let (index?, nil): // Remove + if index < _values.count { + let standin = _values.remove(at: index) + _values.append(standin) + } + _keys._removeExistingMember(at: index, in: bucket) + case let (nil, value?): // Insert + _keys._appendNew(key, in: bucket) + _values.append(value) + case (nil, nil): // Noop + break + } + } + + yield &value + } + } + + /// Accesses the value with the given key. If the dictionary doesn't contain + /// the given key, accesses the provided default value as if the key and + /// default value existed in the dictionary. + /// + /// Use this subscript when you want either the value for a particular key + /// or, when that key is not present in the dictionary, a default value. This + /// example uses the subscript with a message to use in case an HTTP response + /// code isn't recognized: + /// + /// var responseMessages: OrderedDictionary = [ + /// 200: "OK", + /// 403: "Access forbidden", + /// 404: "File not found", + /// 500: "Internal server error"] + /// + /// let httpResponseCodes = [200, 403, 301] + /// for code in httpResponseCodes { + /// let message = responseMessages[code, default: "Unknown response"] + /// print("Response \(code): \(message)") + /// } + /// // Prints "Response 200: OK" + /// // Prints "Response 403: Access forbidden" + /// // Prints "Response 301: Unknown response" + /// + /// When a dictionary's `Value` type has value semantics, you can use this + /// subscript to perform in-place operations on values in the dictionary. + /// The following example uses this subscript while counting the occurrences + /// of each letter in a string: + /// + /// let message = "Hello, Elle!" + /// var letterCounts: [Character: Int] = [:] + /// for letter in message { + /// letterCounts[letter, default: 0] += 1 + /// } + /// // letterCounts == ["H": 1, "e": 2, "l": 4, "o": 1, ...] + /// + /// When `letterCounts[letter, defaultValue: 0] += 1` is executed with a + /// value of `letter` that isn't already a key in `letterCounts`, the + /// specified default value (`0`) is returned from the subscript, + /// incremented, and then added to the dictionary under that key. + /// + /// - Note: Do not use this subscript to modify dictionary values if the + /// dictionary's `Value` type is a class. In that case, the default value + /// and key are not written back to the dictionary after an operation. (For + /// a variant of this operation that supports this usecase, see + /// `updateValue(forKey:default:_:)`.) + /// + /// - Parameters: + /// - key: The key the look up in the dictionary. + /// - defaultValue: The default value to use if `key` doesn't exist in the + /// dictionary. + /// + /// - Returns: The value associated with `key` in the dictionary; otherwise, + /// `defaultValue`. + /// + /// - Complexity: Looking up values in the dictionary through this subscript + /// has an expected complexity of O(1) hashing/comparison operations on + /// average, if `Key` implements high-quality hashing. Updating the + /// dictionary also has an amortized expected complexity of O(1) -- + /// although individual updates may need to copy or resize the dictionary's + /// underlying storage. + @inlinable + internal subscript( + key: Key, + default defaultValue: @autoclosure () -> Value + ) -> Value { + get { + guard let offset = _keys.firstIndex(of: key) else { return defaultValue() } + return _values[offset] + } + _modify { + let (inserted, index) = _keys.append(key) + if inserted { + assert(index == _values.count) + _values.append(defaultValue()) + } + var value: Value = _values.withUnsafeMutableBufferPointer { buffer in + assert(index < buffer.count) + return (buffer.baseAddress! + index).move() + } + defer { + _values.withUnsafeMutableBufferPointer { buffer in + assert(index < buffer.count) + (buffer.baseAddress! + index).initialize(to: value) + } + } + yield &value + } + } +} + +extension OrderedDictionary { + /// Returns a new dictionary containing the keys of this dictionary with the + /// values transformed by the given closure. + /// + /// - Parameter transform: A closure that transforms a value. `transform` + /// accepts each value of the dictionary as its parameter and returns a + /// transformed value of the same or of a different type. + /// - Returns: A dictionary containing the keys and transformed values of + /// this dictionary. + /// + /// - Complexity: O(`count`) + @inlinable + internal func mapValues( + _ transform: (Value) throws -> T + ) rethrows -> OrderedDictionary { + OrderedDictionary( + _uniqueKeys: _keys, + values: ContiguousArray(try _values.map(transform))) + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Equatable.swift b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Equatable.swift new file mode 100644 index 0000000..58f6581 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Equatable.swift @@ -0,0 +1,29 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension OrderedSet: Equatable { + /// Returns a Boolean value indicating whether two values are equal. + /// + /// Two ordered sets are considered equal if they contain the same + /// elements in the same order. + /// + /// - Complexity: O(`min(left.count, right.count)`) + @inlinable + internal static func ==(left: Self, right: Self) -> Bool { + left.elementsEqual(right) + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Insertions.swift b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Insertions.swift new file mode 100644 index 0000000..c342a6f --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Insertions.swift @@ -0,0 +1,97 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension OrderedSet { + /// Append a new member to the end of the set, without verifying + /// that the set doesn't already contain it. + /// + /// This operation performs no hashing operations unless it needs to + /// reallocate the hash table. + /// + /// - Complexity: Expected to be O(1) on average if `Element` + /// implements high-quality hashing. + @inlinable + internal mutating func _appendNew(_ item: Element) { + assert(!contains(item)) + _elements.append(item) + guard _elements.count <= _capacity else { + _regenerateHashTable() + return + } + guard _table != nil else { return } + _ensureUnique() + _table!.update { hashTable in + var it = hashTable.bucketIterator(for: item) + it.advanceToNextUnoccupiedBucket() + it.currentValue = _elements.count - 1 + } + } + + /// Append a new member to the end of the set, registering it in the + /// specified hash table bucket, without verifying that the set + /// doesn't already contain it. + /// + /// This operation performs no hashing operations unless it needs to + /// reallocate the hash table. + /// + /// - Complexity: Amortized O(1) + @inlinable + internal mutating func _appendNew(_ item: Element, in bucket: _Bucket) { + _elements.append(item) + + guard _elements.count <= _capacity else { + _regenerateHashTable() + return + } + guard _table != nil else { return } + _ensureUnique() + _table!.update { hashTable in + assert(!hashTable.isOccupied(bucket)) + hashTable[bucket] = _elements.count - 1 + } + } + + @inlinable + @discardableResult + internal mutating func _append(_ item: Element) -> (inserted: Bool, index: Int) { + let (index, bucket) = _find(item) + if let index = index { return (false, index) } + _appendNew(item, in: bucket) + return (true, _elements.index(before: _elements.endIndex)) + } + + /// Append a new member to the end of the set, if the set doesn't + /// already contain it. + /// + /// - Parameter item: The element to add to the set. + /// + /// - Returns: A pair `(inserted, index)`, where `inserted` is a Boolean value + /// indicating whether the operation added a new element, and `index` is + /// the index of `item` in the resulting set. + /// + /// - Complexity: The operation is expected to perform O(1) copy, hash, and + /// compare operations on the `Element` type, if it implements high-quality + /// hashing. + @inlinable + @inline(__always) + @discardableResult + internal mutating func append(_ item: Element) -> (inserted: Bool, index: Int) { + let result = _append(item) + _checkInvariants() + return result + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Invariants.swift b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Invariants.swift new file mode 100644 index 0000000..c7a21de --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Invariants.swift @@ -0,0 +1,60 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension OrderedSet { + #if COLLECTIONS_INTERNAL_CHECKS + @inlinable + @inline(never) @_effects(releasenone) + internal func _checkInvariants() { + if _table == nil { + precondition(_elements.count <= _HashTable.maximumUnhashedCount, + "Oversized set without a hash table") + precondition(Set(_elements).count == _elements.count, + "Duplicate items in set") + return + } + // Check that each element in _elements can be found in the hash table. + for index in _elements.indices { + let item = _elements[index] + let i = _find(item).index + precondition(i != nil, + "Index \(index) not found in hash table (element: \(item))") + precondition( + i == index, + "Offset of element '\(item)' in hash table differs from its position") + } + // Check that the hash table has exactly as many entries as there are elements. + _table!.read { hashTable in + var it = hashTable.bucketIterator(startingAt: _Bucket(offset: 0)) + var c = 0 + repeat { + it.advance() + if it.isOccupied { c += 1 } + } while it.currentBucket.offset != 0 + precondition( + c == _elements.count, + """ + Number of entries in hash table (\(c)) differs + from number of elements (\(_elements.count)) + """) + } + } + #else + @inline(__always) @inlinable + internal func _checkInvariants() {} + #endif // COLLECTIONS_INTERNAL_CHECKS +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Partial SetAlgebra+Basics.swift b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Partial SetAlgebra+Basics.swift new file mode 100644 index 0000000..3ff3dd6 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+Partial SetAlgebra+Basics.swift @@ -0,0 +1,39 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +// `OrderedSet` does not directly conform to `SetAlgebra` because its definition +// of equality conflicts with `SetAlgebra` requirements. However, it still +// implements most `SetAlgebra` requirements (except `insert`, which is replaced +// by `append`). +// +// `OrderedSet` also provides an `unordered` view that explicitly conforms to +// `SetAlgebra`. That view implements `Equatable` by ignoring element order, +// so it can satisfy `SetAlgebra` requirements. + +extension OrderedSet { + /// Creates an empty set. + /// + /// This initializer is equivalent to initializing with an empty array + /// literal. + /// + /// - Complexity: O(1) + @inlinable + internal init() { + __storage = nil + _elements = [] + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+RandomAccessCollection.swift b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+RandomAccessCollection.swift new file mode 100644 index 0000000..a21d958 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+RandomAccessCollection.swift @@ -0,0 +1,264 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension OrderedSet: Sequence { + /// The type that allows iteration over an ordered set's elements. + internal typealias Iterator = IndexingIterator +} + +extension OrderedSet: RandomAccessCollection { + /// The index type for ordered sets, `Int`. + /// + /// `OrderedSet` indices are integer offsets from the start of the collection, + /// starting at zero for the first element (if exists). + internal typealias Index = Int + + /// The type that represents the indices that are valid for subscripting an + /// ordered set, in ascending order. + internal typealias Indices = Range + + // For SubSequence, see OrderedSet+SubSequence.swift. + + /// The position of the first element in a nonempty ordered set. + /// + /// For an instance of `OrderedSet`, `startIndex` is always zero. If the set + /// is empty, `startIndex` is equal to `endIndex`. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal var startIndex: Int { _elements.startIndex } + + /// The set's "past the end" position---that is, the position one greater + /// than the last valid subscript argument. + /// + /// In an `OrderedSet`, `endIndex` always equals the count of elements. + /// If the set is empty, `endIndex` is equal to `startIndex`. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal var endIndex: Int { _elements.endIndex } + + /// The indices that are valid for subscripting the collection, in ascending + /// order. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal var indices: Indices { _elements.indices } + + /// Returns the position immediately after the given index. + /// + /// The specified index must be a valid index less than `endIndex`, or the + /// returned value won't be a valid index in the set. + /// + /// - Parameter i: A valid index of the collection. + /// + /// - Returns: The index immediately after `i`. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal func index(after i: Int) -> Int { i + 1 } + + /// Returns the position immediately before the given index. + /// + /// The specified index must be a valid index greater than `startIndex`, or + /// the returned value won't be a valid index in the set. + /// + /// - Parameter i: A valid index of the collection. + /// + /// - Returns: The index immediately before `i`. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal func index(before i: Int) -> Int { i - 1 } + + /// Replaces the given index with its successor. + /// + /// The specified index must be a valid index less than `endIndex`, or the + /// returned value won't be a valid index in the set. + /// + /// - Parameter i: A valid index of the collection. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal func formIndex(after i: inout Int) { i += 1 } + + /// Replaces the given index with its predecessor. + /// + /// The specified index must be a valid index greater than `startIndex`, or + /// the returned value won't be a valid index in the set. + /// + /// - Parameter i: A valid index of the collection. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal func formIndex(before i: inout Int) { i -= 1 } + + /// Returns an index that is the specified distance from the given index. + /// + /// The value passed as `distance` must not offset `i` beyond the bounds of + /// the collection, or the returned value will not be a valid index. + /// + /// - Parameters: + /// - i: A valid index of the set. + /// - distance: The distance to offset `i`. + /// + /// - Returns: An index offset by `distance` from the index `i`. If `distance` + /// is positive, this is the same value as the result of `distance` calls to + /// `index(after:)`. If `distance` is negative, this is the same value as + /// the result of `abs(distance)` calls to `index(before:)`. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal func index(_ i: Int, offsetBy distance: Int) -> Int { + i + distance + } + + /// Returns an index that is the specified distance from the given index, + /// unless that distance is beyond a given limiting index. + /// + /// The value passed as `distance` must not offset `i` beyond the bounds of + /// the collection, unless the index passed as `limit` prevents offsetting + /// beyond those bounds. (Otherwise the returned value won't be a valid index + /// in the set.) + /// + /// - Parameters: + /// - i: A valid index of the set. + /// - distance: The distance to offset `i`. + /// - limit: A valid index of the collection to use as a limit. If + /// `distance > 0`, `limit` has no effect if it is less than `i`. + /// Likewise, if `distance < 0`, `limit` has no effect if it is greater + /// than `i`. + /// - Returns: An index offset by `distance` from the index `i`, unless that + /// index would be beyond `limit` in the direction of movement. In that + /// case, the method returns `nil`. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal func index( + _ i: Int, + offsetBy distance: Int, + limitedBy limit: Int + ) -> Int? { + _elements.index(i, offsetBy: distance, limitedBy: limit) + } + + /// Returns the distance between two indices. + /// + /// - Parameters: + /// - start: A valid index of the collection. + /// - end: Another valid index of the collection. If `end` is equal to + /// `start`, the result is zero. + /// + /// - Returns: The distance between `start` and `end`. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal func distance(from start: Int, to end: Int) -> Int { + end - start + } + + /// Accesses the element at the specified position. + /// + /// - Parameter index: The position of the element to access. `index` must be + /// greater than or equal to `startIndex` and less than `endIndex`. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal subscript(position: Int) -> Element { + _elements[position] + } + + /// Accesses a contiguous subrange of the set's elements. + /// + /// The returned `Subsequence` instance uses the same indices for the same + /// elements as the original set. In particular, that slice, unlike an + /// `OrderedSet`, may have a nonzero `startIndex` and an `endIndex` that is + /// not equal to `count`. Always use the slice's `startIndex` and `endIndex` + /// properties instead of assuming that its indices start or end at a + /// particular value. + /// + /// - Parameter bounds: A range of valid indices in the set. + /// + /// - Complexity: O(1) + @inlinable + internal subscript(bounds: Range) -> SubSequence { + _failEarlyRangeCheck(bounds, bounds: startIndex ..< endIndex) + return SubSequence(base: self, bounds: bounds) + } + + /// A Boolean value indicating whether the collection is empty. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal var isEmpty: Bool { _elements.isEmpty } + + /// The number of elements in the set. + /// + /// - Complexity: O(1) + @inlinable + @inline(__always) + internal var count: Int { _elements.count } + + @inlinable + internal func _customIndexOfEquatableElement(_ element: Element) -> Int?? { + guard let table = _table else { + return _elements._customIndexOfEquatableElement(element) + } + return table.read { hashTable in + let (o, _) = hashTable._find(element, in: _elements) + guard let offset = o else { return .some(nil) } + return offset + } + } + + @inlinable + @inline(__always) + internal func _customLastIndexOfEquatableElement(_ element: Element) -> Int?? { + // OrderedSet holds unique elements. + _customIndexOfEquatableElement(element) + } + + @inlinable + @inline(__always) + internal func _failEarlyRangeCheck(_ index: Int, bounds: Range) { + _elements._failEarlyRangeCheck(index, bounds: bounds) + } + + @inlinable + @inline(__always) + internal func _failEarlyRangeCheck(_ index: Int, bounds: ClosedRange) { + _elements._failEarlyRangeCheck(index, bounds: bounds) + } + + @inlinable + @inline(__always) + internal func _failEarlyRangeCheck(_ range: Range, bounds: Range) { + _elements._failEarlyRangeCheck(range, bounds: bounds) + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+ReserveCapacity.swift b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+ReserveCapacity.swift new file mode 100644 index 0000000..2cb151f --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet+ReserveCapacity.swift @@ -0,0 +1,107 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension OrderedSet { + /// Reserves enough space to store the specified number of elements. + /// + /// This method ensures that the set has unique mutable storage, with space + /// allocated for at least the requested number of elements. + /// + /// If you are adding a known number of elements to a set, call this method + /// once before the first insertion to avoid multiple reallocations. + /// + /// Do not call this method in a loop -- it does not use an exponential + /// allocation strategy, so doing that can result in quadratic instead of + /// linear performance. + /// + /// - Parameter minimumCapacity: The minimum number of elements that the set + /// should be able to store without reallocating its storage. + /// + /// - Complexity: O(`max(count, minimumCapacity)`) + @inlinable + internal mutating func reserveCapacity(_ minimumCapacity: Int) { + self._reserveCapacity(minimumCapacity, persistent: false) + } +} + +extension OrderedSet { + /// Reserves enough space to store the specified number of elements. + /// + /// This method ensures that the set has unique mutable storage, with space + /// allocated for at least the requested number of elements. + /// + /// If you are adding a known number of elements to a set, call this method + /// once before the first insertion to avoid multiple reallocations. + /// + /// Do not call this method in a loop -- it does not use an exponential + /// allocation strategy, so doing that can result in quadratic instead of + /// linear performance. + /// + /// If you have a good idea of the expected working size of the set, calling + /// this method with `persistent` set to true can sometimes improve + /// performance by eliminating churn due to repeated rehashings when the set + /// temporarily shrinks below its regular size. You can cancel any capacity + /// you've previously reserved by persistently reserving a capacity of zero. + /// (This also shrinks the hash table to the ideal size for its current number + /// elements.) + /// + /// - Parameter minimumCapacity: The minimum number of elements that the set + /// should be able to store without reallocating its storage. + /// + /// - Parameter persistent: If set to true, prevent removals from shrinking + /// storage below the specified capacity. By default, removals are allowed + /// to shrink storage below any previously reserved capacity. + /// + /// - Complexity: O(`max(count, minimumCapacity)`) + @inlinable + internal mutating func _reserveCapacity( + _ minimumCapacity: Int, + persistent: Bool + ) { + precondition(minimumCapacity >= 0, "Minimum capacity cannot be negative") + defer { _checkInvariants() } + + _elements.reserveCapacity(minimumCapacity) + + let currentScale = _scale + let newScale = _HashTable.scale(forCapacity: minimumCapacity) + + let reservedScale = persistent ? newScale : _reservedScale + + if currentScale < newScale { + // Grow the table. + _regenerateHashTable(scale: newScale, reservedScale: reservedScale) + return + } + + let requiredScale = _HashTable.scale(forCapacity: self.count) + let minScale = Swift.max(Swift.max(newScale, reservedScale), requiredScale) + if minScale < currentScale { + // Shrink the table. + _regenerateHashTable(scale: minScale, reservedScale: reservedScale) + return + } + + // When we have the right size table, ensure it's unique and it has the + // right persisted reservation. + _ensureUnique() + if _reservedScale != reservedScale { + // Remember reserved scale. + __storage!.header.reservedScale = reservedScale + } + } +} diff --git a/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet.swift b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet.swift new file mode 100644 index 0000000..4738e4a --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/OrderedSet/OrderedSet.swift @@ -0,0 +1,369 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +/// An ordered collection of unique elements. +/// +/// Similar to the standard `Set`, ordered sets ensure that each element appears +/// only once in the collection, and they provide efficient tests for +/// membership. However, like `Array` (and unlike `Set`), ordered sets maintain +/// their elements in a particular user-specified order, and they support +/// efficient random-access traversal of their members. +/// +/// `OrderedSet` is a useful alternative to `Set` when the order of elements is +/// important, or when you need to be able to efficiently access elements at +/// various positions within the collection. It can also be used instead of an +/// `Array` when each element needs to be unique, or when you need to be able to +/// quickly determine if a value is a member of the collection. +/// +/// You can create an ordered set with any element type that conforms to the +/// `Hashable` protocol. +/// +/// let buildingMaterials: OrderedSet = ["straw", "sticks", "bricks"] +/// +/// +/// # Equality of Ordered Sets +/// +/// Two ordered sets are considered equal if they contain the same elements, and +/// *in the same order*. This matches the concept of equality of an `Array`, and +/// it is different from the unordered `Set`. +/// +/// let a: OrderedSet = [1, 2, 3, 4] +/// let b: OrderedSet = [4, 3, 2, 1] +/// a == b // false +/// b.sort() // `b` now has value [1, 2, 3, 4] +/// a == b // true +/// +/// # Set Operations +/// +/// `OrderedSet` implements most, but not all, `SetAlgebra` requirements. In +/// particular, it supports the membership test `contains(_:)` as well as all +/// high-level set operations such as `union(_:)`, `intersection(_:)` or +/// `isSubset(of:)`. +/// +/// buildingMaterials.contains("glass") // false +/// buildingMaterials.intersection(["brick", "straw"]) // ["straw", "brick"] +/// +/// Operations that return an ordered set usually preserve the ordering of +/// elements in their input. For example, in the case of the `intersection` call +/// above, the ordering of elements in the result is guaranteed to match their +/// order in the first input set, `buildingMaterials`. +/// +/// On the other hand, predicates such as `isSubset(of:)` tend to ignore element +/// ordering: +/// +/// let moreMaterials: OrderedSet = ["bricks", "glass", "sticks", "straw"] +/// buildingMaterials.isSubset(of: moreMaterials) // true +/// +/// However, `OrderedSet` does not implement `insert(_:)` nor `update(with:)` -- +/// it provides its own variants for insertion that are more explicit about +/// where in the collection new elements gets inserted: +/// +/// func insert(_ item: Element, at index: Int) -> (inserted: Bool, index: Int) +/// func append(_ item: Element) -> (inserted: Bool, index: Int) +/// func update(at index: Int, with item: Element) -> Element +/// func updateOrAppend(_ item: Element) -> Element? +/// +/// Additionally,`OrderedSet` has an order-sensitive definition of equality (see +/// above) that is incompatible with `SetAlgebra`'s documented semantic +/// requirements. Accordingly, `OrderedSet` does not (cannot) itself conform to +/// `SetAlgebra`. +/// +/// # Unordered Set View +/// +/// For cases where `SetAlgebra` conformance is desired (such as when passing an +/// ordered set to a function that is generic over that protocol), `OrderedSet` +/// provides an efficient *unordered view* of its elements that conforms to +/// `SetAlgebra`. The unordered view implements the same concept of equality as +/// the standard `Set`, ignoring element ordering. +/// +/// var a: OrderedSet = [0, 1, 2, 3] +/// let b: OrderedSet = [3, 2, 1, 0] +/// a == b // false +/// a.unordered == b.unordered // true +/// +/// func frobnicate(_ set: S) { ... } +/// frobnicate(a) // error: `OrderedSet` does not conform to `SetAlgebra` +/// frobnicate(a.unordered) // OK +/// +/// The unordered view is mutable. Insertions into it implicitly append new +/// elements to the end of the collection. +/// +/// buildingMaterials.unordered.insert("glass") // => inserted: true +/// // buildingMaterials is now ["straw", "sticks", "brick", "glass"] +/// +/// Accessing the unordered view is an efficient operation, with constant +/// (minimal) overhead. Direct mutations of the unordered view (such as the +/// insertion above) are executed in place when possible. However, as usual with +/// copy-on-write collections, if you make a copy of the view (such as by +/// extracting its value into a named variable), the resulting values will share +/// the same underlying storage, so mutations of either will incur a copy of the +/// whole set. +/// +/// # Sequence and Collection Operations +/// +/// Ordered sets are random-access collections. Members are assigned integer +/// indices, with the first element always being at index `0`: +/// +/// let buildingMaterials: OrderedSet = ["straw", "sticks", "bricks"] +/// buildingMaterials[1] // "sticks" +/// buildingMaterials.firstIndex(of: "bricks") // 2 +/// +/// for i in 0 ..< buildingMaterials.count { +/// print("Little piggie #\(i) built a house of \(buildingMaterials[i])") +/// } +/// // Little piggie #0 built a house of straw +/// // Little piggie #1 built a house of sticks +/// // Little piggie #2 built a house of bricks +/// +/// Because `OrderedSet` needs to keep its members unique, it cannot conform to +/// the full `MutableCollection` or `RangeReplaceableCollection` protocols. +/// Operations such as `MutableCollection`'s subscript setter or +/// `RangeReplaceableCollection`'s `replaceSubrange` assume the ability to +/// insert/replace arbitrary elements in the collection, but allowing that could +/// lead to duplicate values. +/// +/// However, `OrderedSet` is able to partially implement these two protocols; +/// namely, there is no issue with mutation operations that merely change the +/// order of elements, or just remove some subset of existing members: +/// +/// // Permutation operations from MutableCollection: +/// func swapAt(_ i: Int, _ j: Int) +/// func partition(by predicate: (Element) throws -> Bool) -> rethrows Int +/// func sort() where Element: Comparable +/// func sort(by predicate: (Element, Element) throws -> Bool) rethrows +/// func shuffle() +/// func shuffle(using generator: inout T) +/// func reverse() +/// +/// // Removal operations from RangeReplaceableCollection: +/// func removeAll(keepingCapacity: Bool = false) +/// func remove(at index: Int) -> Element +/// func removeSubrange(_ bounds: Range) +/// func removeLast() -> Element +/// func removeLast(_ n: Int) +/// func removeFirst() -> Element +/// func removeFirst(_ n: Int) +/// func removeAll(where shouldBeRemoved: (Element) throws -> Bool) rethrows +/// +/// `OrderedSet` also implements `reserveCapacity(_)` from +/// `RangeReplaceableCollection`, to allow for efficient insertion of a known +/// number of elements. (However, unlike `Array` and `Set`, `OrderedSet` does +/// not provide a `capacity` property.) +/// +/// # Accessing The Contents of an Ordered Set as an Array +/// +/// In cases where you need to pass the contents of an ordered set to a function +/// that only takes an array value or (or something that's generic over +/// `RangeReplaceableCollection` or `MutableCollection`), then the best option +/// is usually to directly extract the members of the `OrderedSet` as an `Array` +/// value using its `elements` property. `OrderedSet` uses a standard array +/// value for element storage, so extracting the array value has minimal +/// overhead. +/// +/// func pickyFunction(_ items: Array) +/// +/// var set: OrderedSet = [0, 1, 2, 3] +/// pickyFunction(set) // error +/// pickyFunction(set.elements) // OK +/// +/// It is also possible to mutate the set by updating the value of the +/// `elements` property. This guarantees that direct mutations happen in place +/// when possible (i.e., without spurious copy-on-write copies). +/// +/// However, the set needs to ensure the uniqueness of its members, so every +/// update to `elements` includes a postprocessing step to detect and remove +/// duplicates over the entire array. This can be slower than doing the +/// equivalent updates with direct `OrderedSet` operations, so updating +/// `elements` is best used in cases where direct implementations aren't +/// available -- for example, when you need to call a `MutableCollection` +/// algorithm that isn't directly implemented by `OrderedSet` itself. +/// +/// # Performance +/// +/// Like the standard `Set` type, the performance of hashing operations in +/// `OrderedSet` is highly sensitive to the quality of hashing implemented by +/// the `Element` type. Failing to correctly implement hashing can easily lead +/// to unacceptable performance, with the severity of the effect increasing with +/// the size of the hash table. +/// +/// In particular, if a certain set of elements all produce the same hash value, +/// then hash table lookups regress to searching an element in an unsorted +/// array, i.e., a linear operation. To ensure hashed collection types exhibit +/// their target performance, it is important to ensure that such collisions +/// cannot be induced merely by adding a particular list of members to the set. +/// +/// The easiest way to achieve this is to make sure `Element` implements hashing +/// following `Hashable`'s documented best practices. The conformance must +/// implement the `hash(into:)` requirement, and every bit of information that +/// is compared in `==` needs to be combined into the supplied `Hasher` value. +/// When used correctly, `Hasher` produces high-quality, randomly seeded hash +/// values that prevent repeatable hash collisions. +/// +/// When `Element` implements `Hashable` correctly, testing for membership in an +/// ordered set is expected to take O(1) equality checks on average. Hash +/// collisions can still occur organically, so the worst-case lookup performance +/// is technically still O(*n*) (where *n* is the size of the set); however, +/// long lookup chains are unlikely to occur in practice. +/// +/// # Implementation Details +/// +/// An `OrderedSet` stores its members in a regular `Array` value (exposed by +/// the `elements` property). It also maintains a standalone hash table +/// containing array indices alongside the array; this is used to implement fast +/// membership tests. The size of the array is limited by the capacity of the +/// corresponding hash table, so indices stored inside the hash table can be +/// encoded into fewer bits than a standard `Int` value, leading to a storage +/// representation that can often be more compact than that of `Set` itself. +/// +/// Inserting or removing a single member (or a range of members) needs to +/// perform the corresponding operation in the storage array, in addition to +/// renumbering any subsequent members in the hash table. Therefore, these +/// operations are expected to have performance characteristics similar to an +/// `Array`: inserting or removing an element to the end of an ordered set is +/// expected to execute in O(1) operations, while they are expected to take +/// linear time at the front (or in the middle) of the set. (Note that this is +/// different to the standard `Set`, where insertions and removals are expected +/// to take amortized O(1) time.) +internal struct OrderedSet where Element: Hashable +{ + @usableFromInline + internal typealias _Bucket = _HashTable.Bucket + + @usableFromInline + internal var __storage: _HashTable.Storage? + + @usableFromInline + internal var _elements: ContiguousArray + + @inlinable + @inline(__always) + internal var _table: _HashTable? { + get { __storage.map { _HashTable($0) } } + set { __storage = newValue?._storage } + } +} + +extension OrderedSet { + /// The maximum number of elements this instance can store before it needs + /// to resize its hash table. + @inlinable + internal var _capacity: Int { + _table?.capacity ?? _HashTable.maximumUnhashedCount + } + + @inlinable + internal var _minimumCapacity: Int { + if _scale == _reservedScale { return 0 } + return _HashTable.minimumCapacity(forScale: _scale) + } + + @inlinable + internal var _scale: Int { + _table?.scale ?? 0 + } + + @inlinable + internal var _reservedScale: Int { + _table?.reservedScale ?? 0 + } +} + +extension OrderedSet { + @inlinable + internal mutating func _regenerateHashTable(scale: Int, reservedScale: Int) { + assert(_HashTable.maximumCapacity(forScale: scale) >= _elements.count) + assert(reservedScale == 0 || reservedScale >= _HashTable.minimumScale) + _table = _HashTable.create( + uncheckedUniqueElements: _elements, + scale: Swift.max(scale, reservedScale), + reservedScale: reservedScale) + } + + @inlinable + internal mutating func _regenerateHashTable() { + let reservedScale = _reservedScale + guard + _elements.count > _HashTable.maximumUnhashedCount || reservedScale != 0 + else { + // We have too few elements; disable hashing. + _table = nil + return + } + let scale = _HashTable.scale(forCapacity: _elements.count) + _regenerateHashTable(scale: scale, reservedScale: reservedScale) + } +} + +extension OrderedSet { + @inlinable + internal mutating func _ensureUnique() { + if __storage == nil { return } + if isKnownUniquelyReferenced(&__storage) { return } + _table = _table!.copy() + } +} + +extension OrderedSet { + @inlinable + internal func _find(_ item: Element) -> (index: Int?, bucket: _Bucket) { + _find_inlined(item) + } + + @inlinable + @inline(__always) + internal func _find_inlined(_ item: Element) -> (index: Int?, bucket: _Bucket) { + _elements.withUnsafeBufferPointer { elements in + guard let table = _table else { + return (elements.firstIndex(of: item), _Bucket(offset: 0)) + } + return table.read { hashTable in + hashTable._find(item, in: elements) + } + } + } +} + +extension OrderedSet { + @inlinable + @discardableResult + internal mutating func _removeExistingMember( + at index: Int, + in bucket: _Bucket + ) -> Element { + guard _elements.count - 1 >= _minimumCapacity else { + let old = _elements.remove(at: index) + _regenerateHashTable() + return old + } + guard _table != nil else { + return _elements.remove(at: index) + } + + defer { _checkInvariants() } + _ensureUnique() + _table!.update { hashTable in + // Delete the entry for the removed member. + hashTable.delete( + bucket: bucket, + hashValueGenerator: { offset, seed in + _elements[offset]._rawHashValue(seed: seed) + }) + hashTable.adjustContents(preparingForRemovalOf: index, in: _elements) + } + return _elements.remove(at: index) + } +} diff --git a/Sources/MultipartKit/OrderedCollections/Utilities/RandomAccessCollection+Offsets.swift b/Sources/MultipartKit/OrderedCollections/Utilities/RandomAccessCollection+Offsets.swift new file mode 100644 index 0000000..f62fcd4 --- /dev/null +++ b/Sources/MultipartKit/OrderedCollections/Utilities/RandomAccessCollection+Offsets.swift @@ -0,0 +1,36 @@ +/* Changes for MultipartKit + - removed all functionality not needed by MultipartKit + - made all public APIs internal + + DO NOT CHANGE THESE FILES, THEY ARE VENDORED FROM Swift Collections. +*/ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Collections open source project +// +// Copyright (c) 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// +//===----------------------------------------------------------------------===// + +extension RandomAccessCollection { + @inlinable + @inline(__always) + internal func _index(at offset: Int) -> Index { + index(startIndex, offsetBy: offset) + } + + @inlinable + @inline(__always) + internal func _offset(of index: Index) -> Int { + distance(from: startIndex, to: index) + } + + @inlinable + @inline(__always) + internal subscript(_offset offset: Int) -> Element { + self[_index(at: offset)] + } +} diff --git a/Sources/MultipartKit/Utilities.swift b/Sources/MultipartKit/Utilities.swift index 35db416..57c3ffb 100644 --- a/Sources/MultipartKit/Utilities.swift +++ b/Sources/MultipartKit/Utilities.swift @@ -1,17 +1,5 @@ import Foundation -extension Array where Element == UInt8 { - mutating func write(string: String) { - if string.utf8.withContiguousStorageIfAvailable({ storage in - self.append(contentsOf: storage) - }) == nil { - (string + "").utf8.withContiguousStorageIfAvailable({ storage in - self.append(contentsOf: storage) - })! - } - } -} - extension HTTPHeaders { func getParameter(_ name: String, _ key: String) -> String? { return self.headerParts(name: name).flatMap { @@ -58,10 +46,3 @@ extension CharacterSet { return .init(charactersIn: #""'"#) } } - -extension Collection { - /// Returns the element at the specified index if it is within bounds, otherwise nil. - subscript (safe index: Index) -> Element? { - self.indices.contains(index) ? self[index] : nil - } -} diff --git a/Tests/MultipartKitTests/MultipartKitTests.swift b/Tests/MultipartKitTests/MultipartKitTests.swift index 957fe27..b9fcd9c 100644 --- a/Tests/MultipartKitTests/MultipartKitTests.swift +++ b/Tests/MultipartKitTests/MultipartKitTests.swift @@ -291,7 +291,7 @@ class MultipartTests: XCTestCase { XCTAssertEqual(data, "--123\r\n\r\nfoo\r\n--123--\r\n") } } - + func testFormDataDecoderMultipleWithMissingData() { /// Content-Type: multipart/form-data; boundary=hello let data = """ @@ -307,12 +307,11 @@ class MultipartTests: XCTestCase { } XCTAssertThrowsError(try FormDataDecoder().decode(Foo.self, from: data, boundary: "hello")) { error in - guard case let MultipartError.missingPart(array) = error else { - XCTFail("Was expecting an error of type MultipartError.missingPart") + guard case let DecodingError.dataCorrupted(context) = error else { + XCTFail("Was expecting an error of type DecodingError.dataCorrupted") return } - - XCTAssertEqual(array, "relative") + XCTAssertEqual(context.codingPath.map(\.stringValue), ["link"]) } } @@ -394,6 +393,158 @@ class MultipartTests: XCTestCase { } } } + + func testNestedEncode() throws { + struct Foo: Encodable { + struct Bar: Encodable { + let baz: Int + } + let bar: Bar + let bars: [Bar] + } + + let encoder = FormDataEncoder() + let data = try encoder.encode(Foo(bar: .init(baz: 1), bars: [.init(baz: 2), .init(baz: 3)]), boundary: "-") + let expected = """ + ---\r + Content-Disposition: form-data; name="bar[baz]"\r + \r + 1\r + ---\r + Content-Disposition: form-data; name="bars[][baz]"\r + \r + 2\r + ---\r + Content-Disposition: form-data; name="bars[][baz]"\r + \r + 3\r + -----\r\n + """ + + XCTAssertEqual(data, expected) + } + + func testNestedDecode() throws { + struct Foo: Decodable, Equatable { + struct Bar: Decodable, Equatable { + let baz: Int + } + let bar: Bar + let bars: [Bar] + } + + let data = """ + ---\r + Content-Disposition: form-data; name="bar[baz]"\r + \r + 1\r + ---\r + Content-Disposition: form-data; name="bars[][baz]"\r + \r + 2\r + ---\r + Content-Disposition: form-data; name="bars[][baz]"\r + \r + 3\r + -----\r\n + """ + + let decoder = FormDataDecoder() + let foo = try decoder.decode(Foo.self, from: data, boundary: "-") + + XCTAssertEqual(foo, Foo(bar: .init(baz: 1), bars: [.init(baz: 2), .init(baz: 3)])) + } + + func testDecodingSingleValue() throws { + let data = """ + ---\r + \r + 1\r + -----\r\n + """ + + let decoder = FormDataDecoder() + let foo = try decoder.decode(Int.self, from: data, boundary: "-") + XCTAssertEqual(foo, 1) + } + + func testMultiPartConvertibleTakesPrecedenceOverDecodable() throws { + struct Foo: Decodable, MultipartPartConvertible { + var multipart: MultipartPart? { nil } + + let success: Bool + + init(from _: Decoder) throws { + success = false + } + init?(multipart: MultipartPart) { + success = true + } + } + + let singleValue = """ + ---\r + \r + \r + -----\r\n + """ + let decoder = FormDataDecoder() + let singleFoo = try decoder.decode(Foo.self, from: singleValue, boundary: "-") + XCTAssertTrue(singleFoo.success) + + let array = """ + ---\r + Content-Disposition: form-data; name=""\r + \r + \r + -----\r\n + """ + + let fooArray = try decoder.decode([Foo].self, from: array, boundary: "-") + XCTAssertFalse(fooArray.isEmpty) + XCTAssertTrue(fooArray.allSatisfy(\.success)) + + let keyed = """ + ---\r + Content-Disposition: form-data; name="a"\r + \r + \r + -----\r\n + """ + + let keyedFoos = try decoder.decode([String: Foo].self, from: keyed, boundary: "-") + XCTAssertFalse(keyedFoos.isEmpty) + XCTAssertTrue(keyedFoos.values.allSatisfy(\.success)) + } + + func testNestingDepth() throws { + let nested = """ + ---\r + Content-Disposition: form-data; name=a[]\r + \r + 1\r + -----\r\n + """ + + XCTAssertNoThrow(try FormDataDecoder(nestingDepth: 3).decode([String: [Int]].self, from: nested, boundary: "-")) + XCTAssertThrowsError(try FormDataDecoder(nestingDepth: 2).decode([String: [Int]].self, from: nested, boundary: "-")) + } + + func testFailingToInitializeMultipartConvertableDoesNotCrash() throws { + struct Foo: MultipartPartConvertible, Decodable { + init?(multipart: MultipartPart) { nil } + var multipart: MultipartPart? { nil } + } + + let input = """ + ---\r + \r + \r + null\r + -----\r\n + """ + XCTAssertThrowsError(try FormDataDecoder().decode(Foo.self, from: input, boundary: "-")) + } } // https://stackoverflow.com/a/54524110/1041105