Compare commits

...

35 Commits
v0.0.4 ... main

Author SHA1 Message Date
e7c7fca697 added buffer argument to OfSeq and OfSeq2 2024-09-09 10:44:32 +02:00
29e60825bc fixed OfMap, OfSeq, OfSeq2 2024-09-09 10:34:38 +02:00
d9d7a1586e OfSeq and OfSeq2 introduced 2024-09-06 13:49:16 +02:00
87bc148570 more filter functions 2024-05-14 20:26:58 +02:00
78436e629c FlatMap function introduced 2024-05-10 20:32:41 +02:00
4612cb74b2 refactored result wrapper func names 2024-04-04 19:13:43 +02:00
faf75185ae refactor 2024-04-04 19:11:33 +02:00
3dfcee4ee4 fixed Get and GetUnsafe methods for Result struct 2024-04-04 19:07:52 +02:00
6fcd709cd3 fixed cancel call in FindFirstAndCancel 2024-01-10 22:00:20 +01:00
35264315fd ToSliceContinuous implemented 2023-04-14 19:05:27 +02:00
9e62bab91e WriteIntoWriter handles error values 2023-03-25 11:52:57 +01:00
e9fdfd6e42 FilterResults fixed 2023-03-25 11:49:59 +01:00
a864cb930d FilterResults implemented 2023-03-25 11:45:55 +01:00
b602502992 FilterSuccess implemented 2023-03-25 11:37:08 +01:00
984aae2553 Result implemented 2023-03-25 11:12:36 +01:00
2db4225251 fixed Map function to wait until all values were mapped before closing channel 2023-03-06 13:03:10 +01:00
ac5678bde6 changed Map implementation to not preserve input order. MapPreserveOrder implemented 2023-03-06 12:55:24 +01:00
f0a869b409 fixed naming schemes for Each methods 2023-03-04 11:24:42 +01:00
169f978e14 fixed argument order 2023-01-16 15:15:21 +01:00
3a80460c18 more FindFirst variants 2023-01-16 15:13:07 +01:00
ba2efffd8e HasAny introduced 2022-12-19 18:07:43 +01:00
79805d1fdc FindFirstAndFlush flushes source concurrently 2022-12-19 18:03:43 +01:00
a85ba0011d Find operations added 2022-12-19 18:01:38 +01:00
19959a6c01 - efficiency of toMap improved by concurrently calling the mapper function
- ToMapSuccessive introduced which still uses old behavior
2022-12-17 19:50:41 +01:00
Timon Ringwald
0a42c8cf0a fixed bugs in TeeMany 2022-08-28 15:28:33 +02:00
Timon Ringwald
81d06f550c fixed bug in FlatChan 2022-08-21 21:22:16 +02:00
Timon Ringwald
4e422b4243 flat functions 2022-08-21 21:20:47 +02:00
Timon Ringwald
608e828f82 moved to git.milar.in 2022-08-03 21:34:08 +02:00
Timon Ringwald
7e7234cf8f renamed MapParallel to Map, renamed Map to MapSuccessive 2022-08-02 17:31:02 +02:00
Timon Ringwald
110846d866 added Filter method 2022-07-05 21:30:19 +02:00
Timon Ringwald
944ab39e28 added OfMap 2022-04-25 20:04:14 +02:00
Timon Ringwald
892814aa5c use max channel capacity instead of average 2022-04-25 15:37:00 +02:00
Timon Ringwald
55c3c7a464 various optimizations 2022-03-30 15:49:24 +02:00
Timon Ringwald
220f4e6525 fixed LimitedRunner behavior if runnable panics 2022-03-30 15:23:47 +02:00
Timon Ringwald
15e4d83ab9 ToMap refactored 2022-03-09 14:55:11 +01:00
15 changed files with 456 additions and 77 deletions

View File

@ -21,9 +21,12 @@ func WriteIntoDelayed[T any](ch chan<- T, delay time.Duration, values ...T) {
// WriteIntoWriter reads all values from ch and writes them via fmt.Fprintln to all writers
func WriteIntoWriter[T any](ch <-chan T, writers ...io.Writer) {
for value := range ch {
for _, w := range writers {
fmt.Fprintln(w, value)
w := io.MultiWriter(writers...)
EachSuccessive(ch, func(value T) {
if err, ok := any(value).(error); ok {
fmt.Fprintln(w, err.Error())
return
}
}
fmt.Fprintln(w, value)
})
}

34
each.go Normal file
View File

@ -0,0 +1,34 @@
package channel
import "sync"
// Each consumes all values and calls f for each of them.
// It blocks until source is closed
func Each[T any](source <-chan T, f func(T)) {
EachWithRunner(source, getDefaultRunner(), f)
}
// Each consumes all values and calls f for each of them.
// It blocks until source is closed
func EachWithRunner[T any](source <-chan T, runner Runner, f func(T)) {
wg := &sync.WaitGroup{}
for value := range source {
value := value
wg.Add(1)
runner.Run(func() {
defer wg.Done()
f(value)
})
}
wg.Wait()
}
// EachSuccessive consumes all values and calls f for each of them.
// It blocks until source is closed
func EachSuccessive[T any](source <-chan T, f func(T)) {
for value := range source {
f(value)
}
}

39
filter.go Normal file
View File

@ -0,0 +1,39 @@
package channel
func FilterSuccessive[T any](source <-chan T, filter func(T) bool) <-chan T {
out := make(chan T, cap(source))
go func() {
defer close(out)
for value := range source {
if filter(value) {
out <- value
}
}
}()
return out
}
func Filter[T any](source <-chan T, filter func(T) bool) <-chan T {
return FilterPreserveOrderWithRunner(source, getDefaultRunner(), filter)
}
func FilterPreserveOrderWithRunner[T any](source <-chan T, runner Runner, filter func(T) bool) <-chan T {
type FilteredValue[T any] struct {
Value T
Filter bool
}
mappedValues := MapPreserveOrderWithRunner(source, runner, func(value T) FilteredValue[T] {
return FilteredValue[T]{Value: value, Filter: filter(value)}
})
filteredValues := FilterSuccessive(mappedValues, func(filteredValue FilteredValue[T]) bool {
return filteredValue.Filter
})
return MapSuccessive(filteredValues, func(filteredValue FilteredValue[T]) T {
return filteredValue.Value
})
}

38
find.go Normal file
View File

@ -0,0 +1,38 @@
package channel
import "context"
func FindFirst[T any](source <-chan T) *T {
for v := range source {
return &v
}
return nil
}
func FindFirstAndCancel[T any](source <-chan T, cancel context.CancelFunc) *T {
defer cancel()
for v := range source {
return &v
}
return nil
}
func FindLast[T any](source <-chan T) *T {
var last *T = new(T)
found := false
for v := range source {
*last = v
found = true
}
if !found {
return nil
}
return last
}
func HasAny[T any](source <-chan T) bool {
return FindFirst(source) != nil
}

46
flat.go Normal file
View File

@ -0,0 +1,46 @@
package channel
func FlatSlice[T any](source <-chan []T) <-chan T {
out := make(chan T, cap(source))
go func() {
defer close(out)
for slice := range source {
for _, v := range slice {
out <- v
}
}
}()
return out
}
func FlatMap[K comparable, V, T any](source <-chan map[K]V, unmapper func(key K, value V) T) <-chan T {
out := make(chan T, cap(source))
go func() {
defer close(out)
for slice := range source {
for k, v := range slice {
out <- unmapper(k, v)
}
}
}()
return out
}
func FlatChan[T any](source <-chan <-chan T) <-chan T {
out := make(chan T, cap(source))
go func() {
defer close(out)
for ch := range source {
for v := range ch {
out <- v
}
}
}()
return out
}

4
go.mod
View File

@ -1,3 +1,3 @@
module git.tordarus.net/Tordarus/channel
module git.milar.in/milarin/channel
go 1.18
go 1.23

12
internal_stuff.go Normal file
View File

@ -0,0 +1,12 @@
package channel
import "runtime"
type mapEntry[K comparable, V any] struct {
Key K
Value V
}
func getDefaultRunner() Runner {
return NewLimitedRunner(runtime.NumCPU())
}

View File

@ -22,7 +22,7 @@ func NewLimitedRunner(routineLimit int) *LimitedRunner {
func (r *LimitedRunner) Run(f func()) {
r.limiter <- struct{}{}
go func() {
defer func() { <-r.limiter }()
f()
<-r.limiter
}()
}

47
map.go
View File

@ -1,13 +1,15 @@
package channel
// MapParallel applies mapper to all I's coming from in and sends their return values to out while conserving input order.
// All mappings will be done as concurrently as possible
func MapParallel[I, O any](source <-chan I, mapper func(I) O) (out <-chan O) {
return MapParallelWithRunner(source, NewUnlimitedRunner(), mapper)
import "sync"
// MapPreserveOrder applies mapper to all I's coming from source and sends their return values to out while preserving input order.
// All mappings will be done as concurrently as possible using as many threads as there are CPU cores
func MapPreserveOrder[I, O any](source <-chan I, mapper func(I) O) (out <-chan O) {
return MapPreserveOrderWithRunner(source, getDefaultRunner(), mapper)
}
// MapParallelWithRunner behaves like MapParallel but uses runner to spawn its routines
func MapParallelWithRunner[I, O any](source <-chan I, runner Runner, mapper func(I) O) <-chan O {
// MapPreserveOrderWithRunner behaves like MapPreserveOrder but uses runner to spawn its routines
func MapPreserveOrderWithRunner[I, O any](source <-chan I, runner Runner, mapper func(I) O) <-chan O {
out := make(chan O, cap(source))
outchannels := make(chan chan O, cap(source))
@ -39,9 +41,38 @@ func MapParallelWithRunner[I, O any](source <-chan I, runner Runner, mapper func
return out
}
// Map applies mapper to all I's coming from in and sends their return values to out while conserving input order.
// All mappings will be done successively
// Map applies mapper to all I's coming from source and sends their return values to out.
// All mappings will be done as concurrently as possible using as many threads as there are CPU cores
func Map[I, O any](source <-chan I, mapper func(I) O) <-chan O {
return MapWithRunner(source, getDefaultRunner(), mapper)
}
// MapWithRunner behaves like Map but uses runner to spawn its routines
func MapWithRunner[I, O any](source <-chan I, runner Runner, mapper func(I) O) <-chan O {
out := make(chan O, cap(source))
go func() {
defer close(out)
wg := &sync.WaitGroup{}
for value := range source {
value := value
wg.Add(1)
runner.Run(func() {
defer wg.Done()
out <- mapper(value)
})
}
wg.Wait()
}()
return out
}
// MapSuccessive applies mapper to all I's coming from source and sends their return values to out while preserving input order.
// All mappings will be done successively in a single thread
func MapSuccessive[I, O any](source <-chan I, mapper func(I) O) <-chan O {
out := make(chan O, cap(source))
go func() {

46
of.go
View File

@ -2,6 +2,7 @@ package channel
import (
"context"
"iter"
"time"
)
@ -51,3 +52,48 @@ func OfFunc[T any](ctx context.Context, buffer int, f func() T) <-chan T {
return out
}
// OfMap returns a channel containing the return values of the unmapper function
// applied to any key-value pair in m
// The order is random
func OfMap[K comparable, V, T any](m map[K]V, unmapper func(K, V) T) <-chan T {
out := make(chan T, len(m))
go func() {
defer close(out)
for k, v := range m {
out <- unmapper(k, v)
}
}()
return out
}
// OfSeq returns a channel containing all values provided by the iterator
func OfSeq[T any](seq iter.Seq[T], buffer int) <-chan T {
out := make(chan T, buffer)
go func() {
defer close(out)
for v := range seq {
out <- v
}
}()
return out
}
// OfSeq2 returns a channel containing the return values of the unmapper function
// when provided with the values of the iterator
func OfSeq2[K comparable, V, T any](seq iter.Seq2[K, V], buffer int, unmapper func(K, V) T) <-chan T {
out := make(chan T, buffer)
go func() {
defer close(out)
for key, value := range seq {
out <- unmapper(key, value)
}
}()
return out
}

102
result.go Normal file
View File

@ -0,0 +1,102 @@
package channel
type Result[T any] struct {
value *T
err error
}
func ResultOf[T any](value T, err error) Result[T] {
if err != nil {
return Result[T]{value: nil, err: err}
}
return Result[T]{value: &value, err: nil}
}
func WrapResultOutputFunc[I, O any](f func(I) (O, error)) func(I) Result[O] {
return func(i I) Result[O] { return ResultOf(f(i)) }
}
func WrapResultFunc[I, O any](f func(I) (O, error)) func(Result[I]) Result[O] {
resFunc := WrapResultOutputFunc(f)
nilValue := *new(O)
return func(r Result[I]) Result[O] {
v, err := r.Get()
if err != nil {
return ResultOf(nilValue, err)
}
return resFunc(v)
}
}
func (r Result[T]) Success() bool {
return r.err == nil
}
func (r Result[T]) Fail() bool {
return !r.Success()
}
func (r Result[T]) GetOrDefault(defaultValue T) T {
if r.Fail() {
return defaultValue
}
return *r.value
}
func (r Result[T]) Get() (T, error) {
if r.err != nil {
return *new(T), r.err
}
return *r.value, r.err
}
func (r Result[T]) GetUnsafe() T {
if r.err != nil {
panic(r.err)
}
return *r.value
}
func (r Result[T]) Err() error {
return r.err
}
func FilterSuccess[T any](source <-chan Result[T]) <-chan T {
succeeded := Filter(source, Result[T].Success)
return MapSuccessive(succeeded, func(r Result[T]) T {
v, _ := r.Get()
return v
})
}
func FilterFail[T any](source <-chan Result[T]) <-chan T {
failed := Filter(source, Result[T].Fail)
return MapSuccessive(failed, func(r Result[T]) T {
v, _ := r.Get()
return v
})
}
func FilterResults[T any](source <-chan Result[T]) (succeeded <-chan T, failed <-chan error) {
succ := make(chan T, cap(source))
fail := make(chan error, cap(source))
go func() {
defer close(succ)
defer close(fail)
for r := range source {
if r.Fail() {
fail <- r.Err()
continue
}
succ <- r.GetUnsafe()
}
}()
return succ, fail
}

37
tee.go Normal file
View File

@ -0,0 +1,37 @@
package channel
// Tee returns 2 channels which both receive all values from source.
// It's basically a copy function for channels
func Tee[T any](source <-chan T) (<-chan T, <-chan T) {
outs := TeeMany(source, 2)
return outs[0], outs[1]
}
// TeeMany returns a given amount of channels which all receive all values from source.
// It's basically a copy function for channels
func TeeMany[T any](source <-chan T, amount int) []<-chan T {
outputs := make([]chan T, amount)
for i := range outputs {
outputs[i] = make(chan T, cap(source))
}
go func() {
defer func() {
for _, out := range outputs {
close(out)
}
}()
for value := range source {
for _, out := range outputs {
out <- value
}
}
}()
readOnlyOutputs := make([]<-chan T, 0, amount)
for _, out := range outputs {
readOnlyOutputs = append(readOnlyOutputs, out)
}
return readOnlyOutputs
}

View File

@ -3,7 +3,8 @@ package channel
import "time"
// CloseOnTimeout returns a channel which receives all values from the source.
// If no value was received in the given timeout duration, the returned channel will be closed
// If no value was received in the given timeout duration, the returned channel will be closed.
// The input channel will not be closed.
func CloseOnTimeout[T any](source <-chan T, timeout time.Duration) <-chan T {
output := make(chan T, cap(source))

60
to.go
View File

@ -5,16 +5,32 @@ import "container/list"
// ToSlice returns a slice containing all values read from ch
func ToSlice[T any](ch <-chan T) []T {
s := make([]T, 0, cap(ch))
Each(ch, func(value T) { s = append(s, value) })
EachSuccessive(ch, func(value T) { s = append(s, value) })
return s
}
// ToSliceContinuous returns a slice containing all values read from ch.
// The returned slice will be a pointer slice to a continuous block of memory.
// All values will be copied.
func ToSliceContinuous[T any](ch <-chan *T) []*T {
values := make([]T, 0, cap(ch))
pointers := make([]*T, 0, cap(ch))
EachSuccessive(ch, func(value *T) {
pointers = append(pointers, value)
if value != nil {
values = append(values, *value)
}
})
return pointers
}
// ToSliceDeref returns a slice containing all values read from ch.
// The returned slice will be a dereferenced and continuous block of memory.
// Nil pointers are ignored.
func ToSliceDeref[T any](ch <-chan *T) []T {
s := make([]T, 0, cap(ch))
Each(ch, func(value *T) {
EachSuccessive(ch, func(value *T) {
if value != nil {
s = append(s, *value)
}
@ -25,28 +41,44 @@ func ToSliceDeref[T any](ch <-chan *T) []T {
// ToList returns a list.List containing all values read from ch
func ToList[T any](ch <-chan T) *list.List {
l := list.New()
Each(ch, func(value T) { l.PushBack(value) })
EachSuccessive(ch, func(value T) { l.PushBack(value) })
return l
}
// ToMap returns a map containing all values read from ch.
// The map keys are determined by f
func ToMap[K comparable, V any](ch <-chan V, f func(V) K) map[K]V {
m := map[K]V{}
Each(ch, func(value V) { m[f(value)] = value })
return m
// The map key-value pairs are determined by f which will be called as concurrently as possible
// to build the resulting map
func ToMap[T any, K comparable, V any](ch <-chan T, f func(T) (K, V)) map[K]V {
return ToMapWithRunner(ch, getDefaultRunner(), f)
}
// ToKeyMap returns a map containing all values read from ch as keys.
// The map values are determined by f
func ToKeyMap[K comparable, V any](ch <-chan K, f func(K) V) map[K]V {
// ToMap returns a map containing all values read from ch.
// The map key-value pairs are determined by f which will be called as concurrently as possible
// to build the resulting map
func ToMapWithRunner[T any, K comparable, V any](ch <-chan T, runner Runner, f func(T) (K, V)) map[K]V {
map2entry := func(t T) mapEntry[K, V] {
k, v := f(t)
return mapEntry[K, V]{Key: k, Value: v}
}
map2kv := func(e mapEntry[K, V]) (K, V) { return e.Key, e.Value }
return ToMapSuccessive(MapWithRunner(ch, runner, map2entry), map2kv)
}
// ToMapSuccessive returns a map containing all values read from ch.
// The map key-value pairs are determined by f
func ToMapSuccessive[T any, K comparable, V any](ch <-chan T, f func(T) (K, V)) map[K]V {
m := map[K]V{}
Each(ch, func(key K) { m[key] = f(key) })
EachSuccessive(ch, func(value T) {
k, v := f(value)
m[k] = v
})
return m
}
// ToStructMap returns a struct{} map containing all values read from ch as keys.
// It is a shorthand for ToKeyMap(ch, func(k K) struct{} { return struct{}{} })
// It is a shorthand for ToMap(ch, func(value T) (T, struct{}) { return value, struct{}{} })
func ToStructMap[T comparable](ch <-chan T) map[T]struct{} {
return ToKeyMap(ch, func(key T) struct{} { return struct{}{} })
return ToMap(ch, func(value T) (T, struct{}) { return value, struct{}{} })
}

View File

@ -5,53 +5,11 @@ func determineBufferSize[T any](channels []<-chan T) int {
return 0
}
bufSize := 0
maxBufSize := 0
for _, ch := range channels {
bufSize += cap(ch)
}
return bufSize / len(channels)
}
// Flush consumes all values and discards them immediately.
// It blocks until all sources are closed
func Flush[T any](sources ...<-chan T) {
for range Merge(sources...) {
}
}
// Each consumes all values and calls f for each of them.
// It blocks until all sources are closed
func Each[T any](source <-chan T, f func(T)) {
for value := range source {
f(value)
}
}
// Tee returns 2 channels which both receive all values from source.
// Its basically a copy function for channels
func Tee[T any](source <-chan T) (<-chan T, <-chan T) {
outs := TeeMany(source, 2)
return outs[0], outs[1]
}
// TeeMany returns a given amount of channels which all receive all values from source.
// Its basically a copy function for channels
func TeeMany[T any](source <-chan T, amount int) []<-chan T {
outs := make([]chan T, amount)
go func() {
defer func() {
for _, out := range outs {
close(out)
}
}()
for value := range source {
for _, out := range outs {
out <- value
}
if cap(ch) > maxBufSize {
maxBufSize = cap(ch)
}
}()
return (interface{}(outs)).([]<-chan T)
}
return maxBufSize
}