From 1ede0f1b66ef6e535eba3f1158ae09d5e0e2ed90 Mon Sep 17 00:00:00 2001 From: Eric Hanson <5846501+ericphanson@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:34:46 +0200 Subject: [PATCH 1/7] test we can load EDFs into MNE --- Project.toml | 8 ++++- test/runtests.jl | 91 +++++++++++++++++++++++++++++++++--------------- 2 files changed, 70 insertions(+), 29 deletions(-) diff --git a/Project.toml b/Project.toml index 9deffef..b4347e3 100644 --- a/Project.toml +++ b/Project.toml @@ -9,13 +9,19 @@ Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" [compat] +Accessors = "0.1" BitIntegers = "0.2" FilePathsBase = "0.9.13" +IOCapture = "0.2.3" +PyMNE = "0.2.2" julia = "1.4" [extras] +Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" FilePathsBase = "48062228-2e41-5def-b9a4-89aafe57970f" +IOCapture = "b5f81e59-6552-4d32-b1f0-c071b021bf89" +PyMNE = "6c5003b2-cbe8-491c-a0d1-70088e6a0fd6" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["FilePathsBase", "Test"] +test = ["Accessors", "FilePathsBase", "IOCapture", "PyMNE", "Test"] diff --git a/test/runtests.jl b/test/runtests.jl index 3a6e18a..7052303 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,15 +1,17 @@ using EDF using EDF: TimestampedAnnotationList, PatientID, RecordingID, SignalHeader, - Signal, AnnotationsSignal + Signal, AnnotationsSignal using Dates using FilePathsBase using Test +using PyMNE +using Accessors ##### ##### Testing utilities ##### -function deep_equal(a::T, b::T) where T +function deep_equal(a::T, b::T) where {T} nfields = fieldcount(T) if nfields == 0 return isequal(a, b) # Use `isequal` instead of `==` to handle `missing` @@ -23,7 +25,7 @@ function deep_equal(a::T, b::T) where T return true end -function deep_equal(a::T, b::T) where T<:AbstractArray +function deep_equal(a::T, b::T) where {T<:AbstractArray} length(a) == length(b) || return false for (x, y) in zip(a, b) deep_equal(x, y) || return false @@ -33,6 +35,21 @@ end deep_equal(a::T, b::S) where {T,S} = false +function mne_read(edf) + tmpfile = joinpath(mktempdir(), "test.edf") + EDF.write(tmpfile, edf) + # Check we can load it and do something with it + py = PyMNE.io.read_raw_edf(tmpfile; verbose=false) + py.load_data(; verbose=false) + collect(py.annotations) + results = IOCapture.capture() do + py.describe() + end + @test !results.error + @test contains(results.output, "RawEDF | test.edf") + return py +end + ##### ##### Actual tests ##### @@ -62,11 +79,11 @@ const DATADIR = joinpath(@__DIR__, "data") # the entire file, but it seems like whoever wrote these onsets might have used values # that were relative to the start of the surrounding data record expected = [[TimestampedAnnotationList(0.0, nothing, String[""]), TimestampedAnnotationList(0.0, nothing, ["start"])], - [TimestampedAnnotationList(1.0, nothing, String[""]), TimestampedAnnotationList(0.1344, 0.256, ["type A"])], - [TimestampedAnnotationList(2.0, nothing, String[""]), TimestampedAnnotationList(0.3904, 1.0, ["type A"])], - [TimestampedAnnotationList(3.0, nothing, String[""]), TimestampedAnnotationList(2.0, nothing, ["type B"])], - [TimestampedAnnotationList(4.0, nothing, String[""]), TimestampedAnnotationList(2.5, 2.5, ["type A"])], - [TimestampedAnnotationList(5.0, nothing, String[""])]] + [TimestampedAnnotationList(1.0, nothing, String[""]), TimestampedAnnotationList(0.1344, 0.256, ["type A"])], + [TimestampedAnnotationList(2.0, nothing, String[""]), TimestampedAnnotationList(0.3904, 1.0, ["type A"])], + [TimestampedAnnotationList(3.0, nothing, String[""]), TimestampedAnnotationList(2.0, nothing, ["type B"])], + [TimestampedAnnotationList(4.0, nothing, String[""]), TimestampedAnnotationList(2.5, 2.5, ["type A"])], + [TimestampedAnnotationList(5.0, nothing, String[""])]] @test all(signal.records .== expected) @test AnnotationsSignal(signal.records).samples_per_record == 16 end @@ -87,17 +104,25 @@ const DATADIR = joinpath(@__DIR__, "data") EDF.read!(file) @test deep_equal(edf.signals, file.signals) + # Check we can read it into MNE + py = mne_read(edf) + @test length(py.annotations) == 5 + ann = py.annotations[1] + @test pyconvert(Float64, ann["onset"]) == 0.1344 + @test pyconvert(Float64, ann["duration"]) == 0.256 + @test pyconvert(String, ann["description"]) == "type A" + # test that EDF.write(::IO, ::EDF.File) errors if file is # discontiguous w/o an AnnotationsSignal present bad_file = EDF.File(IOBuffer(), - EDF.FileHeader(file.header.version, - file.header.patient, - file.header.recording, - file.header.start, - false, # is_contiguous - file.header.record_count, - file.header.seconds_per_record), - filter(s -> !(s isa AnnotationsSignal), file.signals)) + EDF.FileHeader(file.header.version, + file.header.patient, + file.header.recording, + file.header.start, + false, # is_contiguous + file.header.record_count, + file.header.seconds_per_record), + filter(s -> !(s isa AnnotationsSignal), file.signals)) @test_throws ArgumentError EDF.write(IOBuffer(), bad_file) # test EDF.write(::AbstractString, ::EDF.File) @@ -156,9 +181,9 @@ const DATADIR = joinpath(@__DIR__, "data") # for x in signal: # f.write("%s\n" % x) # ``` - mne = map(line->parse(Float32, line), eachline(joinpath(DATADIR, "mne_values.csv"))) + mne = map(line -> parse(Float32, line), eachline(joinpath(DATADIR, "mne_values.csv"))) for (a, b) in zip(EDF.decode(signal), mne) - @test a ≈ b atol=0.01 + @test a ≈ b atol = 0.01 end # Truncated files @@ -167,10 +192,10 @@ const DATADIR = joinpath(@__DIR__, "data") # note that this tests a truncated final record, not an incorrect number of records truncated_file = joinpath(dir, "test_truncated" * last(splitext(full_file))) full_edf_bytes = read(joinpath(DATADIR, full_file)) - write(truncated_file, full_edf_bytes[1:(end - 1)]) + write(truncated_file, full_edf_bytes[1:(end-1)]) @test_logs((:warn, "Number of data records in file header does not match " * - "file size. Skipping 1 truncated data record(s)."), - EDF.read(truncated_file)) + "file size. Skipping 1 truncated data record(s)."), + EDF.read(truncated_file)) edf = EDF.read(joinpath(DATADIR, full_file)) truncated_edf = EDF.read(truncated_file) for field in fieldnames(EDF.FileHeader) @@ -187,13 +212,13 @@ const DATADIR = joinpath(@__DIR__, "data") bad = truncated_edf.signals[i] if good isa EDF.Signal @test deep_equal(good.header, bad.header) - @test good.samples[1:(end - good.header.samples_per_record)] == bad.samples + @test good.samples[1:(end-good.header.samples_per_record)] == bad.samples else @test good.samples_per_record == bad.samples_per_record end end - @test deep_equal(edf.signals[end].records[1:(edf.header.record_count - 1)], - truncated_edf.signals[end].records) + @test deep_equal(edf.signals[end].records[1:(edf.header.record_count-1)], + truncated_edf.signals[end].records) # Ensure that "exotic" IO types work for truncated records if the requisite # methods exist fb = FileBuffer(Path(truncated_file)) @@ -215,7 +240,7 @@ const DATADIR = joinpath(@__DIR__, "data") for i in 1:8 bdf_values = EDF.decode(bdf.signals[i]) comp_values = EDF.decode(comp.signals[i]) - @test bdf_values ≈ comp_values rtol=0.01 + @test bdf_values ≈ comp_values rtol = 0.01 end # Ensure that BDF files can also be round-tripped mktempdir() do dir @@ -241,15 +266,25 @@ const DATADIR = joinpath(@__DIR__, "data") edf = EDF.File(io) @test sprint(show, edf) == "EDF.File with 140 16-bit-encoded signals" end + + @testset "Exports readable by MNE" begin + edf = EDF.read(joinpath(DATADIR, "test_float_extrema.edf")) + @test edf.signals[1].header.digital_minimum ≈ -32767.0f0 + edf = @set edf.signals[1].header.digital_minimum = -32767*2 + + py = mne_read(edf) + @test isempty(py.annotations) + + end end @testset "BDF+ Files" begin # This is a `BDF+` file containing only trigger information. - # It is similiar to a `EDF Annotations` file except that + # It is similiar to a `EDF Annotations` file except that # The `ANNOTATIONS_SIGNAL_LABEL` is `BDF Annotations`. - # The test data has 1081 trigger events, and - # has 180 trials in total, and + # The test data has 1081 trigger events, and + # has 180 trials in total, and # The annotation `255` signifies the offset of a trial. # More information, contact: zhanlikan@hotmail.com evt = EDF.read(joinpath(DATADIR, "evt.bdf")) From 85bcd38ae056bc8d9cf2a701bdbb85fcffb47846 Mon Sep 17 00:00:00 2001 From: Eric Hanson <5846501+ericphanson@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:36:53 +0200 Subject: [PATCH 2/7] format --- .JuliaFormatter.toml | 1 + test/runtests.jl | 76 +++++++++++++++++++++++++------------------- 2 files changed, 44 insertions(+), 33 deletions(-) create mode 100644 .JuliaFormatter.toml diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml new file mode 100644 index 0000000..857c3ae --- /dev/null +++ b/.JuliaFormatter.toml @@ -0,0 +1 @@ +style = "yas" diff --git a/test/runtests.jl b/test/runtests.jl index 7052303..21ce990 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,6 +1,6 @@ using EDF using EDF: TimestampedAnnotationList, PatientID, RecordingID, SignalHeader, - Signal, AnnotationsSignal + Signal, AnnotationsSignal using Dates using FilePathsBase using Test @@ -16,7 +16,7 @@ function deep_equal(a::T, b::T) where {T} if nfields == 0 return isequal(a, b) # Use `isequal` instead of `==` to handle `missing` else - for i = 1:nfields + for i in 1:nfields typeof(getfield(a, i)) <: IO && continue # Two different files will have different IO sources isdefined(a, i) || return !isdefined(b, i) # Call two undefs equal deep_equal(getfield(a, i), getfield(b, i)) || return false @@ -43,7 +43,7 @@ function mne_read(edf) py.load_data(; verbose=false) collect(py.annotations) results = IOCapture.capture() do - py.describe() + return py.describe() end @test !results.error @test contains(results.output, "RawEDF | test.edf") @@ -71,19 +71,25 @@ const DATADIR = joinpath(@__DIR__, "data") @test length(edf.signals) == 140 for signal in edf.signals if signal isa EDF.Signal - @test length(signal.samples) == signal.header.samples_per_record * edf.header.record_count + @test length(signal.samples) == + signal.header.samples_per_record * edf.header.record_count else @test length(signal.records) == edf.header.record_count # XXX seems like this test file actually contains nonsensical onset timestamps... # according to the EDF+ specification, onsets should be relative to the start time of # the entire file, but it seems like whoever wrote these onsets might have used values # that were relative to the start of the surrounding data record - expected = [[TimestampedAnnotationList(0.0, nothing, String[""]), TimestampedAnnotationList(0.0, nothing, ["start"])], - [TimestampedAnnotationList(1.0, nothing, String[""]), TimestampedAnnotationList(0.1344, 0.256, ["type A"])], - [TimestampedAnnotationList(2.0, nothing, String[""]), TimestampedAnnotationList(0.3904, 1.0, ["type A"])], - [TimestampedAnnotationList(3.0, nothing, String[""]), TimestampedAnnotationList(2.0, nothing, ["type B"])], - [TimestampedAnnotationList(4.0, nothing, String[""]), TimestampedAnnotationList(2.5, 2.5, ["type A"])], - [TimestampedAnnotationList(5.0, nothing, String[""])]] + expected = [[TimestampedAnnotationList(0.0, nothing, String[""]), + TimestampedAnnotationList(0.0, nothing, ["start"])], + [TimestampedAnnotationList(1.0, nothing, String[""]), + TimestampedAnnotationList(0.1344, 0.256, ["type A"])], + [TimestampedAnnotationList(2.0, nothing, String[""]), + TimestampedAnnotationList(0.3904, 1.0, ["type A"])], + [TimestampedAnnotationList(3.0, nothing, String[""]), + TimestampedAnnotationList(2.0, nothing, ["type B"])], + [TimestampedAnnotationList(4.0, nothing, String[""]), + TimestampedAnnotationList(2.5, 2.5, ["type A"])], + [TimestampedAnnotationList(5.0, nothing, String[""])]] @test all(signal.records .== expected) @test AnnotationsSignal(signal.records).samples_per_record == 16 end @@ -115,14 +121,14 @@ const DATADIR = joinpath(@__DIR__, "data") # test that EDF.write(::IO, ::EDF.File) errors if file is # discontiguous w/o an AnnotationsSignal present bad_file = EDF.File(IOBuffer(), - EDF.FileHeader(file.header.version, - file.header.patient, - file.header.recording, - file.header.start, - false, # is_contiguous - file.header.record_count, - file.header.seconds_per_record), - filter(s -> !(s isa AnnotationsSignal), file.signals)) + EDF.FileHeader(file.header.version, + file.header.patient, + file.header.recording, + file.header.start, + false, # is_contiguous + file.header.record_count, + file.header.seconds_per_record), + filter(s -> !(s isa AnnotationsSignal), file.signals)) @test_throws ArgumentError EDF.write(IOBuffer(), bad_file) # test EDF.write(::AbstractString, ::EDF.File) @@ -137,11 +143,14 @@ const DATADIR = joinpath(@__DIR__, "data") @test eof(io) end - @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-0.0023405432)) == "-0.00234" - @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(0.0023405432)) == "0.002340" + @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-0.0023405432)) == + "-0.00234" + @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(0.0023405432)) == + "0.002340" @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(1.002343)) == "1.002343" @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(1011.05432)) == "1011.054" - @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-1011.05432)) == "-1011.05" + @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-1011.05432)) == + "-1011.05" @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-1013441.5)) == "-1013442" @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-1013441.3)) == "-1013441" @test EDF._edf_repr(34577777) == "34577777" @@ -156,13 +165,15 @@ const DATADIR = joinpath(@__DIR__, "data") uneven = EDF.read(joinpath(DATADIR, "test_uneven_samp.edf")) @test sprint(show, uneven) == "EDF.File with 2 16-bit-encoded signals" @test uneven.header.version == "0" - @test uneven.header.patient == "A 3Hz sinewave and a 0.2Hz block signal, both starting in their positive phase" + @test uneven.header.patient == + "A 3Hz sinewave and a 0.2Hz block signal, both starting in their positive phase" @test uneven.header.recording == "110 seconds from 13-JUL-2000 12.05.48hr." @test uneven.header.is_contiguous @test uneven.header.start == DateTime(2000, 1, 31, 23, 0, 59) @test uneven.header.record_count == 11 @test uneven.header.seconds_per_record == 10.0 - @test uneven.signals[1].header.samples_per_record != uneven.signals[2].header.samples_per_record + @test uneven.signals[1].header.samples_per_record != + uneven.signals[2].header.samples_per_record @test length(uneven.signals) == 2 nonint = EDF.read(joinpath(DATADIR, "test_float_extrema.edf")) @@ -192,10 +203,11 @@ const DATADIR = joinpath(@__DIR__, "data") # note that this tests a truncated final record, not an incorrect number of records truncated_file = joinpath(dir, "test_truncated" * last(splitext(full_file))) full_edf_bytes = read(joinpath(DATADIR, full_file)) - write(truncated_file, full_edf_bytes[1:(end-1)]) - @test_logs((:warn, "Number of data records in file header does not match " * - "file size. Skipping 1 truncated data record(s)."), - EDF.read(truncated_file)) + write(truncated_file, full_edf_bytes[1:(end - 1)]) + @test_logs((:warn, + "Number of data records in file header does not match " * + "file size. Skipping 1 truncated data record(s)."), + EDF.read(truncated_file)) edf = EDF.read(joinpath(DATADIR, full_file)) truncated_edf = EDF.read(truncated_file) for field in fieldnames(EDF.FileHeader) @@ -212,13 +224,13 @@ const DATADIR = joinpath(@__DIR__, "data") bad = truncated_edf.signals[i] if good isa EDF.Signal @test deep_equal(good.header, bad.header) - @test good.samples[1:(end-good.header.samples_per_record)] == bad.samples + @test good.samples[1:(end - good.header.samples_per_record)] == bad.samples else @test good.samples_per_record == bad.samples_per_record end end - @test deep_equal(edf.signals[end].records[1:(edf.header.record_count-1)], - truncated_edf.signals[end].records) + @test deep_equal(edf.signals[end].records[1:(edf.header.record_count - 1)], + truncated_edf.signals[end].records) # Ensure that "exotic" IO types work for truncated records if the requisite # methods exist fb = FileBuffer(Path(truncated_file)) @@ -270,15 +282,13 @@ const DATADIR = joinpath(@__DIR__, "data") @testset "Exports readable by MNE" begin edf = EDF.read(joinpath(DATADIR, "test_float_extrema.edf")) @test edf.signals[1].header.digital_minimum ≈ -32767.0f0 - edf = @set edf.signals[1].header.digital_minimum = -32767*2 + edf = @set edf.signals[1].header.digital_minimum = -32767 * 2 py = mne_read(edf) @test isempty(py.annotations) - end end - @testset "BDF+ Files" begin # This is a `BDF+` file containing only trigger information. # It is similiar to a `EDF Annotations` file except that From 1c43beea52924524cbd312fcc5baef21760777ec Mon Sep 17 00:00:00 2001 From: Eric Hanson <5846501+ericphanson@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:41:14 +0200 Subject: [PATCH 3/7] undo formatting changes --- test/runtests.jl | 50 ++++++++++++++++++------------------------------ 1 file changed, 19 insertions(+), 31 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index 21ce990..98d2755 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -11,12 +11,12 @@ using Accessors ##### Testing utilities ##### -function deep_equal(a::T, b::T) where {T} +function deep_equal(a::T, b::T) where T nfields = fieldcount(T) if nfields == 0 return isequal(a, b) # Use `isequal` instead of `==` to handle `missing` else - for i in 1:nfields + for i = 1:nfields typeof(getfield(a, i)) <: IO && continue # Two different files will have different IO sources isdefined(a, i) || return !isdefined(b, i) # Call two undefs equal deep_equal(getfield(a, i), getfield(b, i)) || return false @@ -25,7 +25,7 @@ function deep_equal(a::T, b::T) where {T} return true end -function deep_equal(a::T, b::T) where {T<:AbstractArray} +function deep_equal(a::T, b::T) where T<:AbstractArray length(a) == length(b) || return false for (x, y) in zip(a, b) deep_equal(x, y) || return false @@ -71,24 +71,18 @@ const DATADIR = joinpath(@__DIR__, "data") @test length(edf.signals) == 140 for signal in edf.signals if signal isa EDF.Signal - @test length(signal.samples) == - signal.header.samples_per_record * edf.header.record_count + @test length(signal.samples) == signal.header.samples_per_record * edf.header.record_count else @test length(signal.records) == edf.header.record_count # XXX seems like this test file actually contains nonsensical onset timestamps... # according to the EDF+ specification, onsets should be relative to the start time of # the entire file, but it seems like whoever wrote these onsets might have used values # that were relative to the start of the surrounding data record - expected = [[TimestampedAnnotationList(0.0, nothing, String[""]), - TimestampedAnnotationList(0.0, nothing, ["start"])], - [TimestampedAnnotationList(1.0, nothing, String[""]), - TimestampedAnnotationList(0.1344, 0.256, ["type A"])], - [TimestampedAnnotationList(2.0, nothing, String[""]), - TimestampedAnnotationList(0.3904, 1.0, ["type A"])], - [TimestampedAnnotationList(3.0, nothing, String[""]), - TimestampedAnnotationList(2.0, nothing, ["type B"])], - [TimestampedAnnotationList(4.0, nothing, String[""]), - TimestampedAnnotationList(2.5, 2.5, ["type A"])], + expected = [[TimestampedAnnotationList(0.0, nothing, String[""]), TimestampedAnnotationList(0.0, nothing, ["start"])], + [TimestampedAnnotationList(1.0, nothing, String[""]), TimestampedAnnotationList(0.1344, 0.256, ["type A"])], + [TimestampedAnnotationList(2.0, nothing, String[""]), TimestampedAnnotationList(0.3904, 1.0, ["type A"])], + [TimestampedAnnotationList(3.0, nothing, String[""]), TimestampedAnnotationList(2.0, nothing, ["type B"])], + [TimestampedAnnotationList(4.0, nothing, String[""]), TimestampedAnnotationList(2.5, 2.5, ["type A"])], [TimestampedAnnotationList(5.0, nothing, String[""])]] @test all(signal.records .== expected) @test AnnotationsSignal(signal.records).samples_per_record == 16 @@ -143,14 +137,11 @@ const DATADIR = joinpath(@__DIR__, "data") @test eof(io) end - @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-0.0023405432)) == - "-0.00234" - @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(0.0023405432)) == - "0.002340" + @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-0.0023405432)) == "-0.00234" + @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(0.0023405432)) == "0.002340" @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(1.002343)) == "1.002343" @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(1011.05432)) == "1011.054" - @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-1011.05432)) == - "-1011.05" + @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-1011.05432)) == "-1011.05" @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-1013441.5)) == "-1013442" @test EDF._edf_repr(EDF._nearest_representable_edf_time_value(-1013441.3)) == "-1013441" @test EDF._edf_repr(34577777) == "34577777" @@ -165,15 +156,13 @@ const DATADIR = joinpath(@__DIR__, "data") uneven = EDF.read(joinpath(DATADIR, "test_uneven_samp.edf")) @test sprint(show, uneven) == "EDF.File with 2 16-bit-encoded signals" @test uneven.header.version == "0" - @test uneven.header.patient == - "A 3Hz sinewave and a 0.2Hz block signal, both starting in their positive phase" + @test uneven.header.patient == "A 3Hz sinewave and a 0.2Hz block signal, both starting in their positive phase" @test uneven.header.recording == "110 seconds from 13-JUL-2000 12.05.48hr." @test uneven.header.is_contiguous @test uneven.header.start == DateTime(2000, 1, 31, 23, 0, 59) @test uneven.header.record_count == 11 @test uneven.header.seconds_per_record == 10.0 - @test uneven.signals[1].header.samples_per_record != - uneven.signals[2].header.samples_per_record + @test uneven.signals[1].header.samples_per_record != uneven.signals[2].header.samples_per_record @test length(uneven.signals) == 2 nonint = EDF.read(joinpath(DATADIR, "test_float_extrema.edf")) @@ -192,9 +181,9 @@ const DATADIR = joinpath(@__DIR__, "data") # for x in signal: # f.write("%s\n" % x) # ``` - mne = map(line -> parse(Float32, line), eachline(joinpath(DATADIR, "mne_values.csv"))) + mne = map(line->parse(Float32, line), eachline(joinpath(DATADIR, "mne_values.csv"))) for (a, b) in zip(EDF.decode(signal), mne) - @test a ≈ b atol = 0.01 + @test a ≈ b atol=0.01 end # Truncated files @@ -204,10 +193,9 @@ const DATADIR = joinpath(@__DIR__, "data") truncated_file = joinpath(dir, "test_truncated" * last(splitext(full_file))) full_edf_bytes = read(joinpath(DATADIR, full_file)) write(truncated_file, full_edf_bytes[1:(end - 1)]) - @test_logs((:warn, - "Number of data records in file header does not match " * + @test_logs((:warn, "Number of data records in file header does not match " * "file size. Skipping 1 truncated data record(s)."), - EDF.read(truncated_file)) + EDF.read(truncated_file)) edf = EDF.read(joinpath(DATADIR, full_file)) truncated_edf = EDF.read(truncated_file) for field in fieldnames(EDF.FileHeader) @@ -252,7 +240,7 @@ const DATADIR = joinpath(@__DIR__, "data") for i in 1:8 bdf_values = EDF.decode(bdf.signals[i]) comp_values = EDF.decode(comp.signals[i]) - @test bdf_values ≈ comp_values rtol = 0.01 + @test bdf_values ≈ comp_values rtol=0.01 end # Ensure that BDF files can also be round-tripped mktempdir() do dir From 22851be73e9c02c2c362ac0c6d1ddf05df60c8cc Mon Sep 17 00:00:00 2001 From: Eric Hanson <5846501+ericphanson@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:44:02 +0200 Subject: [PATCH 4/7] restore trailing whitespace --- test/runtests.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index 98d2755..903c4f8 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -279,10 +279,10 @@ end @testset "BDF+ Files" begin # This is a `BDF+` file containing only trigger information. - # It is similiar to a `EDF Annotations` file except that + # It is similiar to a `EDF Annotations` file except that # The `ANNOTATIONS_SIGNAL_LABEL` is `BDF Annotations`. - # The test data has 1081 trigger events, and - # has 180 trials in total, and + # The test data has 1081 trigger events, and + # has 180 trials in total, and # The annotation `255` signifies the offset of a trial. # More information, contact: zhanlikan@hotmail.com evt = EDF.read(joinpath(DATADIR, "evt.bdf")) From 5654070102ddb040cf894ca6aab61509ba9896f9 Mon Sep 17 00:00:00 2001 From: Eric Hanson <5846501+ericphanson@users.noreply.github.com> Date: Mon, 16 Oct 2023 14:09:40 +0200 Subject: [PATCH 5/7] fix tests --- .github/workflows/ci.yml | 2 +- Project.toml | 2 +- test/runtests.jl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8a8b63c..aa1103d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ jobs: fail-fast: false matrix: version: - - '1.4' # Earliest supported version + - '1.6' # Earliest supported version - '1' # Latest release - 'nightly' os: diff --git a/Project.toml b/Project.toml index b4347e3..cef1f47 100644 --- a/Project.toml +++ b/Project.toml @@ -14,7 +14,7 @@ BitIntegers = "0.2" FilePathsBase = "0.9.13" IOCapture = "0.2.3" PyMNE = "0.2.2" -julia = "1.4" +julia = "1.6" [extras] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/test/runtests.jl b/test/runtests.jl index 903c4f8..7c4e6d5 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,7 +6,7 @@ using FilePathsBase using Test using PyMNE using Accessors - +using IOCapture ##### ##### Testing utilities ##### From ae560b3c9cd30165849393197912334a836a3c8f Mon Sep 17 00:00:00 2001 From: Eric Hanson <5846501+ericphanson@users.noreply.github.com> Date: Mon, 16 Oct 2023 14:32:55 +0200 Subject: [PATCH 6/7] simpler test --- Project.toml | 4 +--- test/runtests.jl | 10 +++------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/Project.toml b/Project.toml index cef1f47..4a13b02 100644 --- a/Project.toml +++ b/Project.toml @@ -12,16 +12,14 @@ Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Accessors = "0.1" BitIntegers = "0.2" FilePathsBase = "0.9.13" -IOCapture = "0.2.3" PyMNE = "0.2.2" julia = "1.6" [extras] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" FilePathsBase = "48062228-2e41-5def-b9a4-89aafe57970f" -IOCapture = "b5f81e59-6552-4d32-b1f0-c071b021bf89" PyMNE = "6c5003b2-cbe8-491c-a0d1-70088e6a0fd6" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["Accessors", "FilePathsBase", "IOCapture", "PyMNE", "Test"] +test = ["Accessors", "FilePathsBase", "PyMNE", "Test"] diff --git a/test/runtests.jl b/test/runtests.jl index 7c4e6d5..8421181 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,7 +6,7 @@ using FilePathsBase using Test using PyMNE using Accessors -using IOCapture + ##### ##### Testing utilities ##### @@ -41,12 +41,8 @@ function mne_read(edf) # Check we can load it and do something with it py = PyMNE.io.read_raw_edf(tmpfile; verbose=false) py.load_data(; verbose=false) - collect(py.annotations) - results = IOCapture.capture() do - return py.describe() - end - @test !results.error - @test contains(results.output, "RawEDF | test.edf") + data = pyconvert(Array, py.get_data()) + @test data isa Matrix return py end From e06d13fc24a4579af1cfba78719ae16cf58ea998 Mon Sep 17 00:00:00 2001 From: Alex Arslan Date: Thu, 4 Apr 2024 17:35:22 -0700 Subject: [PATCH 7/7] Fix formatting Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- test/runtests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/runtests.jl b/test/runtests.jl index 5783696..450ecbb 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -203,7 +203,7 @@ const DATADIR = joinpath(@__DIR__, "data") @test_logs((:warn, "Number of data records in file header does not match " * "file size. Skipping 1 truncated data record(s)."), - EDF.read(truncated_file)) + EDF.read(truncated_file)) edf = EDF.read(joinpath(DATADIR, full_file)) truncated_edf = EDF.read(truncated_file) for field in fieldnames(EDF.FileHeader)