1//go:build ((freebsd || openbsd || netbsd || dragonfly || illumos) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !sqlite3_dotlk) || sqlite3_flock
2
3package vfs
4
5import (
6 "context"
7 "errors"
8 "io"
9 "io/fs"
10 "os"
11 "sync"
12
13 "github.com/tetratelabs/wazero/api"
14 "golang.org/x/sys/unix"
15
16 "github.com/ncruces/go-sqlite3/internal/util"
17)
18
19type vfsShmParent struct {
20 *os.File
21 info os.FileInfo
22
23 refs int // +checklocks:vfsShmListMtx
24
25 lock [_SHM_NLOCK]int8 // +checklocks:Mutex
26 sync.Mutex
27}
28
29var (
30 // +checklocks:vfsShmListMtx
31 vfsShmList []*vfsShmParent
32 vfsShmListMtx sync.Mutex
33)
34
35type vfsShm struct {
36 *vfsShmParent
37 path string
38 lock [_SHM_NLOCK]bool
39 regions []*util.MappedRegion
40}
41
42func (s *vfsShm) Close() error {
43 if s.vfsShmParent == nil {
44 return nil
45 }
46
47 vfsShmListMtx.Lock()
48 defer vfsShmListMtx.Unlock()
49
50 // Unlock everything.
51 s.shmLock(0, _SHM_NLOCK, _SHM_UNLOCK)
52
53 // Decrease reference count.
54 if s.vfsShmParent.refs > 0 {
55 s.vfsShmParent.refs--
56 s.vfsShmParent = nil
57 return nil
58 }
59
60 err := s.File.Close()
61 for i, g := range vfsShmList {
62 if g == s.vfsShmParent {
63 vfsShmList[i] = nil
64 s.vfsShmParent = nil
65 return err
66 }
67 }
68 panic(util.AssertErr())
69}
70
71func (s *vfsShm) shmOpen() (rc _ErrorCode) {
72 if s.vfsShmParent != nil {
73 return _OK
74 }
75
76 vfsShmListMtx.Lock()
77 defer vfsShmListMtx.Unlock()
78
79 // Stat file without opening it.
80 // Closing it would release all POSIX locks on it.
81 fi, err := os.Stat(s.path)
82 if err != nil && !errors.Is(err, fs.ErrNotExist) {
83 return _IOERR_FSTAT
84 }
85
86 // Find a shared file, increase the reference count.
87 for _, g := range vfsShmList {
88 if g != nil && os.SameFile(fi, g.info) {
89 s.vfsShmParent = g
90 g.refs++
91 return _OK
92 }
93 }
94
95 // Always open file read-write, as it will be shared.
96 f, err := os.OpenFile(s.path,
97 os.O_RDWR|os.O_CREATE|_O_NOFOLLOW, 0666)
98 if err != nil {
99 return _CANTOPEN
100 }
101 defer func() {
102 if rc != _OK {
103 f.Close()
104 }
105 }()
106
107 // Dead man's switch.
108 if lock, rc := osTestLock(f, _SHM_DMS, 1); rc != _OK {
109 return _IOERR_LOCK
110 } else if lock == unix.F_WRLCK {
111 return _BUSY
112 } else if lock == unix.F_UNLCK {
113 if rc := osWriteLock(f, _SHM_DMS, 1); rc != _OK {
114 return rc
115 }
116 if err := f.Truncate(0); err != nil {
117 return _IOERR_SHMOPEN
118 }
119 }
120 if rc := osReadLock(f, _SHM_DMS, 1); rc != _OK {
121 return rc
122 }
123
124 fi, err = f.Stat()
125 if err != nil {
126 return _IOERR_FSTAT
127 }
128
129 // Add the new shared file.
130 s.vfsShmParent = &vfsShmParent{
131 File: f,
132 info: fi,
133 }
134 for i, g := range vfsShmList {
135 if g == nil {
136 vfsShmList[i] = s.vfsShmParent
137 return _OK
138 }
139 }
140 vfsShmList = append(vfsShmList, s.vfsShmParent)
141 return _OK
142}
143
144func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (ptr_t, _ErrorCode) {
145 // Ensure size is a multiple of the OS page size.
146 if int(size)&(unix.Getpagesize()-1) != 0 {
147 return 0, _IOERR_SHMMAP
148 }
149
150 if rc := s.shmOpen(); rc != _OK {
151 return 0, rc
152 }
153
154 // Check if file is big enough.
155 o, err := s.Seek(0, io.SeekEnd)
156 if err != nil {
157 return 0, _IOERR_SHMSIZE
158 }
159 if n := (int64(id) + 1) * int64(size); n > o {
160 if !extend {
161 return 0, _OK
162 }
163 if osAllocate(s.File, n) != nil {
164 return 0, _IOERR_SHMSIZE
165 }
166 }
167
168 r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size, false)
169 if err != nil {
170 return 0, _IOERR_SHMMAP
171 }
172 s.regions = append(s.regions, r)
173 return r.Ptr, _OK
174}
175
176func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
177 s.Lock()
178 defer s.Unlock()
179
180 // Check if we can obtain/release locks locally.
181 rc := s.shmMemLock(offset, n, flags)
182 if rc != _OK {
183 return rc
184 }
185
186 // Obtain/release the appropriate file locks.
187 switch {
188 case flags&_SHM_UNLOCK != 0:
189 // Relasing a shared lock decrements the counter,
190 // but may leave parts of the range still locked.
191 begin, end := offset, offset+n
192 for i := begin; i < end; i++ {
193 if s.vfsShmParent.lock[i] != 0 {
194 if i > begin {
195 rc |= osUnlock(s.File, _SHM_BASE+int64(begin), int64(i-begin))
196 }
197 begin = i + 1
198 }
199 }
200 if end > begin {
201 rc |= osUnlock(s.File, _SHM_BASE+int64(begin), int64(end-begin))
202 }
203 return rc
204 case flags&_SHM_SHARED != 0:
205 // Acquiring a new shared lock on the file is only necessary
206 // if there was a new shared lock in the range.
207 for i := offset; i < offset+n; i++ {
208 if s.vfsShmParent.lock[i] == 1 {
209 rc = osReadLock(s.File, _SHM_BASE+int64(offset), int64(n))
210 break
211 }
212 }
213 case flags&_SHM_EXCLUSIVE != 0:
214 // Acquiring an exclusive lock on the file is always necessary.
215 rc = osWriteLock(s.File, _SHM_BASE+int64(offset), int64(n))
216 default:
217 panic(util.AssertErr())
218 }
219
220 // Release the local locks we had acquired.
221 if rc != _OK {
222 s.shmMemLock(offset, n, flags^(_SHM_UNLOCK|_SHM_LOCK))
223 }
224 return rc
225}
226
227func (s *vfsShm) shmUnmap(delete bool) {
228 if s.vfsShmParent == nil {
229 return
230 }
231
232 // Unmap regions.
233 for _, r := range s.regions {
234 r.Unmap()
235 }
236 s.regions = nil
237
238 // Close the file.
239 if delete {
240 os.Remove(s.path)
241 }
242 s.Close()
243}
244
245func (s *vfsShm) shmBarrier() {
246 s.Lock()
247 //lint:ignore SA2001 memory barrier.
248 s.Unlock()
249}