109 stmt.string(2) = digest.toString |
109 stmt.string(2) = digest.toString |
110 }) |
110 }) |
111 } |
111 } |
112 |
112 |
113 def clean_entry(db: SQL.Database, session_name: String): Unit = |
113 def clean_entry(db: SQL.Database, session_name: String): Unit = |
114 Data.transaction_lock(db, create = true) { Data.clean_entry(db, session_name) } |
114 Data.transaction_lock(db, create = true, synchronized = true) { |
|
115 Data.clean_entry(db, session_name) |
|
116 } |
115 |
117 |
116 def get_entry(db: SQL.Database, session_name: String): Option[SHA1.Digest] = |
118 def get_entry(db: SQL.Database, session_name: String): Option[SHA1.Digest] = |
117 Data.transaction_lock(db, create = true) { Data.get_entry(db, session_name) } |
119 Data.transaction_lock(db, create = true, synchronized = true) { |
|
120 Data.get_entry(db, session_name) |
|
121 } |
118 |
122 |
119 def store( |
123 def store( |
120 database: Option[SQL.Database], |
124 database: Option[SQL.Database], |
121 session_name: String, |
125 session_name: String, |
122 heap: Path, |
126 heap: Path, |
131 |
135 |
132 val slices = (size.toDouble / slice.toDouble).ceil.toInt |
136 val slices = (size.toDouble / slice.toDouble).ceil.toInt |
133 val step = (size.toDouble / slices.toDouble).ceil.toLong |
137 val step = (size.toDouble / slices.toDouble).ceil.toLong |
134 |
138 |
135 try { |
139 try { |
136 Data.transaction_lock(db, create = true) { Data.prepare_entry(db, session_name) } |
140 Data.transaction_lock(db, create = true, synchronized = true) { |
|
141 Data.prepare_entry(db, session_name) |
|
142 } |
137 |
143 |
138 for (i <- 0 until slices) { |
144 for (i <- 0 until slices) { |
139 val j = i + 1 |
145 val j = i + 1 |
140 val offset = step * i |
146 val offset = step * i |
141 val limit = if (j < slices) step * j else size |
147 val limit = if (j < slices) step * j else size |
142 val content = |
148 val content = |
143 Bytes.read_file(heap.file, offset = offset, limit = limit) |
149 Bytes.read_file(heap.file, offset = offset, limit = limit) |
144 .compress(cache = cache) |
150 .compress(cache = cache) |
145 Data.transaction_lock(db) { Data.write_entry(db, session_name, i, content) } |
151 Data.transaction_lock(db, synchronized = true) { |
|
152 Data.write_entry(db, session_name, i, content) |
|
153 } |
146 } |
154 } |
147 |
155 |
148 Data.transaction_lock(db) { Data.finish_entry(db, session_name, size, digest) } |
156 Data.transaction_lock(db, synchronized = true) { |
|
157 Data.finish_entry(db, session_name, size, digest) |
|
158 } |
149 } |
159 } |
150 catch { case exn: Throwable => |
160 catch { case exn: Throwable => |
151 Data.transaction_lock(db, create = true) { Data.clean_entry(db, session_name) } |
161 Data.transaction_lock(db, create = true, synchronized = true) { |
|
162 Data.clean_entry(db, session_name) |
|
163 } |
152 throw exn |
164 throw exn |
153 } |
165 } |
154 } |
166 } |
155 digest |
167 digest |
156 } |
168 } |
162 cache: Compress.Cache = Compress.Cache.none |
174 cache: Compress.Cache = Compress.Cache.none |
163 ): Unit = { |
175 ): Unit = { |
164 database match { |
176 database match { |
165 case None => |
177 case None => |
166 case Some(db) => |
178 case Some(db) => |
167 Data.transaction_lock(db, create = true) { |
179 Data.transaction_lock(db, create = true, synchronized = true) { |
168 val db_digest = Data.get_entry(db, session_name) |
180 val db_digest = Data.get_entry(db, session_name) |
169 val file_digest = read_file_digest(heap) |
181 val file_digest = read_file_digest(heap) |
170 |
182 |
171 if (db_digest.isDefined && db_digest != file_digest) { |
183 if (db_digest.isDefined && db_digest != file_digest) { |
172 Isabelle_System.make_directory(heap.expand.dir) |
184 Isabelle_System.make_directory(heap.expand.dir) |