-
Notifications
You must be signed in to change notification settings - Fork 434
Expand file tree
/
Copy pathcn1_globals.m
More file actions
1552 lines (1387 loc) · 66.8 KB
/
cn1_globals.m
File metadata and controls
1552 lines (1387 loc) · 66.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#include "cn1_globals.h"
#include <assert.h>
#include <unistd.h>
#include "java_lang_Class.h"
#include "java_lang_Object.h"
#include "java_lang_Boolean.h"
#include "java_lang_String.h"
#include "java_lang_Integer.h"
#include "java_lang_Byte.h"
#include "java_lang_Short.h"
#include "java_lang_Character.h"
#include "java_lang_Thread.h"
#include "java_lang_Long.h"
#include "java_lang_Double.h"
#include "java_lang_Float.h"
#include "java_lang_Runnable.h"
#include "java_lang_System.h"
JAVA_BOOLEAN lowMemoryMode = JAVA_FALSE;
int mallocWhileSuspended = 0;
BOOL isAppSuspended = NO;
#include "java_lang_ArrayIndexOutOfBoundsException.h"
#if defined(__APPLE__) && defined(__OBJC__)
#import <mach/mach.h>
#import <mach/mach_host.h>
#define CN1_LOG(fmt, ...) NSLog(@fmt, ##__VA_ARGS__)
#else
#include <time.h>
#include <stdio.h>
#define CN1_LOG(fmt, ...) printf(fmt "\n", ##__VA_ARGS__)
#endif
// The amount of memory allocated between GC cycle checks (generally 30 seconds)
// that triggers "High-frequency" GC mode. When "High-frequency" mode is triggered,
// it will only wait 200ms before triggering another GC cycle after completing the
// previous one. Normally it's 30 seconds.
// This value is in bytes
long CN1_HIGH_FREQUENCY_ALLOCATION_THRESHOLD = 1024 * 1024;
// "High frequency" GC mode won't be enabled until the "total" allocated memory
// in the app reaches this threshold
// This value is in bytes
long CN1_HIGH_FREQUENCY_ALLOCATION_ACTIVATED_THRESHOLD = 10 * 1024 * 1024;
// The number of allocations (not measured in bytes, but actual allocation count) made on
// a thread that will result in the thread being treated as an aggressive allocator.
// If, during GC, it hits a thread that is an aggressive allocator, GC will lock that thread
// until the sweep is complete for all threads. Normally, the thread is only locked while
// its objects are being marked.
// If the EDT is hitting this threshold, we'll have problems
long CN1_AGRESSIVE_ALLOCATOR_THREAD_HEAP_ALLOCATIONS_THRESHOLD = 5000;
long CN1_AGRESSIVE_ALLOCATOR_THREAD_HEAP_ALLOCATIONS_THRESHOLD_EDT = 10000;
// The max number of allocations (not bytes, but number) on a thread before
// it will refuse to increase its size. This is checked when allocating objects.
// If the thread is at its max size during allocation, it will aggressively call
// the GC and wait until the GC is complete before the allocation can occur.
// On the EDT, this has usability consequences.
// If the allocation array is maxed out, but hasn't reached this max size,
// it will double the size of the allocation array and trigger a GC (but not wait
// for the GC to complete).
long CN1_MAX_HEAP_SIZE = 10000;
// Special value for the EDT to possibly allow for a larger allocation stack on the
// EDT.
long CN1_MAX_HEAP_SIZE_EDT = 10000;
// THE THREAD ID OF THE EDT. We'll treat the EDT specially.
long CN1_EDT_THREAD_ID = -1;
// A flag to indicate if the GC thresholds are initialized yet
// @see init_gc_thresholds
static JAVA_BOOLEAN GC_THRESHOLDS_INITIALIZED = JAVA_FALSE;
int currentGcMarkValue = 1;
extern JAVA_BOOLEAN lowMemoryMode;
static JAVA_BOOLEAN isEdt(long threadId) {
return (CN1_EDT_THREAD_ID == threadId);
}
// Gets the amount of free memory in the system.
static long get_free_memory(void)
{
#if defined(__APPLE__) && defined(__OBJC__)
mach_port_t host_port;
mach_msg_type_number_t host_size;
vm_size_t pagesize;
host_port = mach_host_self();
host_size = sizeof(vm_statistics_data_t) / sizeof(integer_t);
host_page_size(host_port, &pagesize);
vm_statistics_data_t vm_stat;
if (host_statistics(host_port, HOST_VM_INFO, (host_info_t)&vm_stat, &host_size) != KERN_SUCCESS)
{
NSLog(@"Failed to fetch vm statistics");
return 0;
}
/* Stats in bytes */
long mem_free = vm_stat.free_count * pagesize;
return mem_free;
#else
return 1024 * 1024 * 100; // Stub: 100MB
#endif
}
// Initializes the GC thresholds based on the free memory on the device.
// This is run inside the gc mark method.
// Previously we had been hardcoding this stuff, but that causes us to miss out
// on the greater capacity of newer devices.
static void init_gc_thresholds() {
if (!GC_THRESHOLDS_INITIALIZED) {
GC_THRESHOLDS_INITIALIZED = JAVA_TRUE;
// On iPhone X, this generally starts with a figure like 388317184 (i.e. ~380 MB)
long freemem = get_free_memory();
// com.codename1.ui.Container is approx 900 bytes
// Most allocations are 32 bytes though... so we're making an estimate
// of the average size of an allocation. This is based on experimentation and it is crude
// This is used for trying to estimate how many allocations can be made on a thread
// before we need to worry.
long avgAllocSize = 128;
// Estimate the number of allocation slots available in all of memory
// On iPhone X, this will generally give around 38000 allocation slots
long maxAllocationSlots = freemem / avgAllocSize;
// Set the number of allocations allowed on a thread before it is considered
// an aggressive allocator. Aggressive allocator status will cause
// the thread to lock until the sweep is complete, whereas other threads
// are only locked during the mark() method.
// The EDT is treated specially here
CN1_AGRESSIVE_ALLOCATOR_THREAD_HEAP_ALLOCATIONS_THRESHOLD = maxAllocationSlots / 3;
if (CN1_AGRESSIVE_ALLOCATOR_THREAD_HEAP_ALLOCATIONS_THRESHOLD < 5000) {
CN1_AGRESSIVE_ALLOCATOR_THREAD_HEAP_ALLOCATIONS_THRESHOLD = 5000;
}
// For the EDT, experimenting with never declaring it aggressive (we don't want to block it)
// unless we've received a low memory warning
CN1_AGRESSIVE_ALLOCATOR_THREAD_HEAP_ALLOCATIONS_THRESHOLD_EDT = maxAllocationSlots * 10;
// Set the high frequency allocation threshold. If the app has allocated more
// than the given threshold (in bytes) between GC cycles, it will issue an additional
// GC cycle immediately after the last one (200ms) (sort of like doubling up.
// Kind of picking numbers out of the air here. one fifth of free memory
// seems alright.
//CN1_HIGH_FREQUENCY_ALLOCATION_THRESHOLD = freemem / 5;
//if (CN1_HIGH_FREQUENCY_ALLOCATION_THRESHOLD < 1024 * 1024) {
// CN1_HIGH_FREQUENCY_ALLOCATION_THRESHOLD = 1024 * 1024;
//}
// Set the threshold of total allocated memory before the high-frequency GC cycles
// are started.
//CN1_HIGH_FREQUENCY_ALLOCATION_ACTIVATED_THRESHOLD = freemem/2;
//if (CN1_HIGH_FREQUENCY_ALLOCATION_ACTIVATED_THRESHOLD < 10 * 1024 * 1024) {
// CN1_HIGH_FREQUENCY_ALLOCATION_ACTIVATED_THRESHOLD = 10 * 1024 * 1024;
//}
// GC will be triggered if the the number of allocations on any thread
// reaches this threshold. It is checked during malloc, so that
// if we try to allocate and the number of allocations exceeds this threshold
// then the thread is stopped until a GC cycle is completed.
CN1_MAX_HEAP_SIZE = maxAllocationSlots / 3;
if (CN1_MAX_HEAP_SIZE < 10000) {
CN1_MAX_HEAP_SIZE = 10000;
}
// This might be a bit permissive (allowing the EDT to grow) to the total
// max allocation slots - but there are other safeguards in place that should
// mitigate the harm done.
CN1_MAX_HEAP_SIZE_EDT = maxAllocationSlots;
if (CN1_MAX_HEAP_SIZE_EDT < 10000) {
CN1_MAX_HEAP_SIZE_EDT = 10000;
}
}
}
//#define DEBUG_GC_OBJECTS_IN_HEAP
struct clazz class_array1__JAVA_BOOLEAN = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, 0, 0, cn1_array_1_id_JAVA_BOOLEAN, "boolean[]", JAVA_TRUE, 1, &class__java_lang_Boolean, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array2__JAVA_BOOLEAN = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_2_id_JAVA_BOOLEAN, "boolean[]", JAVA_TRUE, 2, &class__java_lang_Boolean, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array3__JAVA_BOOLEAN = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_3_id_JAVA_BOOLEAN, "boolean[]", JAVA_TRUE, 3, &class__java_lang_Boolean, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array1__JAVA_CHAR = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, 0, 0, cn1_array_1_id_JAVA_CHAR, "char[]", JAVA_TRUE, 1, &class__java_lang_Character, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array2__JAVA_CHAR = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_2_id_JAVA_CHAR, "char[]", JAVA_TRUE, 2, &class__java_lang_Character, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array3__JAVA_CHAR = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_3_id_JAVA_CHAR, "char[]", JAVA_TRUE, 3, &class__java_lang_Character, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array1__JAVA_BYTE = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, 0, 0, cn1_array_1_id_JAVA_BYTE, "byte[]", JAVA_TRUE, 1, &class__java_lang_Byte, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array2__JAVA_BYTE = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_2_id_JAVA_BYTE, "byte[]", JAVA_TRUE, 2, &class__java_lang_Byte, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array3__JAVA_BYTE = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_3_id_JAVA_BYTE, "byte[]", JAVA_TRUE, 3, &class__java_lang_Byte, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array1__JAVA_SHORT = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, 0, 0, cn1_array_1_id_JAVA_SHORT, "short[]", JAVA_TRUE, 1, &class__java_lang_Short, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array2__JAVA_SHORT = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_2_id_JAVA_SHORT, "short[]", JAVA_TRUE, 2, &class__java_lang_Short, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array3__JAVA_SHORT = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_3_id_JAVA_SHORT, "short[]", JAVA_TRUE, 3, &class__java_lang_Short, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array1__JAVA_INT = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, 0, 0, cn1_array_1_id_JAVA_INT, "int[]", JAVA_TRUE, 1, &class__java_lang_Integer, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array2__JAVA_INT = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_2_id_JAVA_INT, "int[]", JAVA_TRUE, 2, &class__java_lang_Integer, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array3__JAVA_INT = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_3_id_JAVA_INT, "int[]", JAVA_TRUE, 3, &class__java_lang_Integer, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array1__JAVA_LONG = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, 0, 0, cn1_array_1_id_JAVA_LONG, "long[]", JAVA_TRUE, 1, &class__java_lang_Long, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array2__JAVA_LONG = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_2_id_JAVA_LONG, "long[]", JAVA_TRUE, 2, &class__java_lang_Long, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array3__JAVA_LONG = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_3_id_JAVA_LONG, "long[]", JAVA_TRUE, 3, &class__java_lang_Long, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array1__JAVA_FLOAT = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, 0, 0, cn1_array_1_id_JAVA_FLOAT, "float[]", JAVA_TRUE, 1, &class__java_lang_Float, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array2__JAVA_FLOAT = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_2_id_JAVA_FLOAT, "float[]", JAVA_TRUE, 2, &class__java_lang_Float, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array3__JAVA_FLOAT = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_3_id_JAVA_FLOAT, "float[]", JAVA_TRUE, 3, &class__java_lang_Float, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array1__JAVA_DOUBLE = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, 0, 0, cn1_array_1_id_JAVA_DOUBLE, "double[]", JAVA_TRUE, 1, &class__java_lang_Double, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
// Minimal stubs for String array classes so clean-target builds that only
// reference primitive wrappers still have constant pool backing storage.
struct clazz class_array1__java_lang_String = {0};
struct clazz class_array2__java_lang_String = {0};
struct clazz class_array2__JAVA_DOUBLE = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_2_id_JAVA_DOUBLE, "double[]", JAVA_TRUE, 2, &class__java_lang_Double, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct clazz class_array3__JAVA_DOUBLE = {
DEBUG_GC_INIT 0, 999999, 0, 0, 0, 0, 0, 0, &gcMarkArrayObject, 0, cn1_array_3_id_JAVA_DOUBLE, "double[]", JAVA_TRUE, 3, &class__java_lang_Double, JAVA_TRUE, &class__java_lang_Object, EMPTY_INTERFACES, 0, 0, 0
};
struct elementStruct* pop(struct elementStruct** sp) {
--(*sp);
return *sp;
}
void popMany(CODENAME_ONE_THREAD_STATE, int count, struct elementStruct** SP) {
while(count > 0) {
--(*SP);
javaTypes t = (*SP)->type;
if(t == CN1_TYPE_DOUBLE || t == CN1_TYPE_LONG) {
count -= 2;
} else {
count--;
}
}
}
JAVA_OBJECT* constantPoolObjects = 0;
struct elementStruct* BC_DUP2_X2_DD(struct elementStruct* SP) {
(*SP).data.l = SP[-1].data.l;
SP[-1].data.l = SP[-2].data.l;
SP[-2].data.l = (*SP).data.l;
(*SP).type = SP[-1].type;
SP[-1].type = SP[-2].type;
SP[-2].type = (*SP).type;
return (struct elementStruct*)(SP+1);
}
struct elementStruct* BC_DUP2_X2_DSS(struct elementStruct* SP) {
SP[0].data.l = SP[-1].data.l;
SP[-1].data.l = SP[-2].data.l;
SP[-2].data.l = SP[-3].data.l;
SP[-3].data.l = SP[0].data.l;
SP[0].type = SP[-1].type;
SP[-1].type = SP[-2].type;
SP[-2].type = SP[-3].type;
SP[-3].type = SP[0].type;
return SP+1;
}
struct elementStruct* BC_DUP2_X2_SSD(struct elementStruct* SP) {
SP[1].data.l = SP[-1].data.l;
SP[0].data.l = SP[-2].data.l;
SP[-1].data.l = SP[-3].data.l;
SP[-2].data.l = SP[1].data.l;
SP[-3].data.l = SP[0].data.l;
SP[1].type = SP[-1].type;
SP[0].type = SP[-2].type;
SP[-1].type = SP[-3].type;
SP[-2].type = SP[1].type;
SP[-3].type = SP[0].type;
return SP+2;
}
struct elementStruct* BC_DUP2_X2_SSSS(struct elementStruct* SP) {
SP[1].data.l = SP[-1].data.l;
SP[0].data.l = SP[-2].data.l;
SP[-1].data.l = SP[-3].data.l;
SP[-2].data.l = SP[-4].data.l;
SP[-3].data.l = SP[1].data.l;
SP[-4].data.l = SP[0].data.l;
SP[1].type = SP[-1].type;
SP[0].type = SP[-2].type;
SP[-1].type = SP[-3].type;
SP[-2].type = SP[-4].type;
SP[-3].type = SP[1].type;
SP[-4].type = SP[0].type;
return SP+2;
}
struct elementStruct* BC_DUP_X2_SD(struct elementStruct* SP) {
SP[0].data.l = SP[-1].data.l;
SP[-1].data.l = SP[-2].data.l;
SP[-2].data.l = SP[0].data.l;
SP[0].type = SP[-1].type;
SP[-1].type = SP[-2].type;
SP[-2].type = SP[0].type;
return SP+1;
}
struct elementStruct* BC_DUP_X2_SSS(struct elementStruct* SP) {
SP[0].data.l = SP[-1].data.l;
SP[-1].data.l = SP[-2].data.l;
SP[-2].data.l = SP[-3].data.l;
SP[-3].data.l = SP[0].data.l;
SP[0].type = SP[-1].type;
SP[-1].type = SP[-2].type;
SP[-2].type = SP[-3].type;
SP[-3].type = SP[0].type;
return SP+1;
}
int instanceofFunction(int sourceClass, int destId) {
if(sourceClass == destId) {
return JAVA_TRUE;
}
if (sourceClass == cn1_array_1_id_JAVA_INT && destId == cn1_class_id_java_lang_Object) {
int foo = 1;
}
if (destId == cn1_array_1_id_JAVA_INT && sourceClass == cn1_class_id_java_lang_Object) {
int foo = 1;
}
if(sourceClass >= cn1_array_start_offset || destId >= cn1_array_start_offset) {
// (destId instanceof sourceClass)
// E.g. (new int[0] instanceof Object) ===> sourceClass==Object and destId=int[]
if (sourceClass < cn1_array_start_offset) {
return sourceClass == cn1_class_id_java_lang_Object;
} else if (destId < cn1_array_start_offset) {
return JAVA_FALSE;
}
// At this point we know that both sourceClass and destId are array types
// The start offset for reference array types
int refArrayStartOffset = cn1_array_start_offset+100;
if (sourceClass < refArrayStartOffset || destId < refArrayStartOffset) {
if (sourceClass >= refArrayStartOffset) {
// We need to deal with things like (int[][] instanceof Object[])
int srcDim = (sourceClass - refArrayStartOffset)%3+1;
int destDim = (destId - cn1_array_start_offset)%4;
if (srcDim < destDim) {
if (srcDim > 1) {
sourceClass = sourceClass-1;
} else {
sourceClass =(sourceClass - refArrayStartOffset)/3;
}
return instanceofFunction(sourceClass, destId-1);
}
}
// if either is primitive, then they must be the same type.
return sourceClass == destId;
}
int srcDimension = (sourceClass - refArrayStartOffset)%3+1;
int destDimension = (destId - refArrayStartOffset)%3+1;
int sourceClassComponentTypeId = srcDimension > 1 ? sourceClass-1 : (sourceClass - refArrayStartOffset)/3;
int destClassComponentTypeId = destDimension > 1 ? destId-1 : (destId - refArrayStartOffset)/3;
return instanceofFunction(sourceClassComponentTypeId, destClassComponentTypeId);
}
int* i = classInstanceOf[destId];
int counter = 0;
while(i[counter] > -1) {
if(i[counter] == sourceClass) {
return JAVA_TRUE;
}
i++;
}
return JAVA_FALSE;
}
JAVA_OBJECT* releaseQueue = 0;
JAVA_INT releaseQueueSize = 0;
typedef void (*finalizerFunctionPointer)(CODENAME_ONE_THREAD_STATE, JAVA_OBJECT obj);
// invokes finalizers and iterates over the release queue
void flushReleaseQueue() {
}
void freeAndFinalize(CODENAME_ONE_THREAD_STATE, JAVA_OBJECT obj) {
finalizerFunctionPointer ptr = (finalizerFunctionPointer)obj->__codenameOneParentClsReference->finalizerFunction;
if(ptr != 0) {
ptr(threadStateData, obj);
}
codenameOneGcFree(threadStateData, obj);
}
/**
* Invoked to destroy an array and release all the objects within it
*/
void arrayFinalizerFunction(CODENAME_ONE_THREAD_STATE, JAVA_OBJECT array) {
}
BOOL invokedGC = NO;
extern int findPointerPosInHeap(JAVA_OBJECT obj);
extern pthread_mutex_t* getMemoryAccessMutex();
extern long gcThreadId;
void gcReleaseObj(JAVA_OBJECT o) {
}
// memory map of all the heap objects which we can walk over to delete/deallocate
// unused objects
JAVA_OBJECT* allObjectsInHeap = 0;
JAVA_OBJECT* oldAllObjectsInHeap = 0;
int sizeOfAllObjectsInHeap = 30000;
int currentSizeOfAllObjectsInHeap = 0;
pthread_mutex_t* memoryAccessMutex = NULL;
pthread_mutex_t* getMemoryAccessMutex() {
if(memoryAccessMutex == NULL) {
memoryAccessMutex = malloc(sizeof(pthread_mutex_t));
pthread_mutex_init(memoryAccessMutex, NULL);
}
return memoryAccessMutex;
}
int findPointerPosInHeap(JAVA_OBJECT obj) {
if(obj == 0) {
return -1;
}
return obj->__heapPosition;
}
// this is an optimization allowing us to continue searching for available space in RAM from the previous position
// that way we avoid looping over elements that we already probably checked
int lastOffsetInRam = 0;
void placeObjectInHeapCollection(JAVA_OBJECT obj) {
if(allObjectsInHeap == 0) {
allObjectsInHeap = malloc(sizeof(JAVA_OBJECT) * sizeOfAllObjectsInHeap);
memset(allObjectsInHeap, 0, sizeof(JAVA_OBJECT) * sizeOfAllObjectsInHeap);
}
if(currentSizeOfAllObjectsInHeap < sizeOfAllObjectsInHeap) {
allObjectsInHeap[currentSizeOfAllObjectsInHeap] = obj;
obj->__heapPosition = currentSizeOfAllObjectsInHeap;
currentSizeOfAllObjectsInHeap++;
} else {
int pos = -1;
JAVA_OBJECT* currentAllObjectsInHeap = allObjectsInHeap;
int currentSize = currentSizeOfAllObjectsInHeap;
for(int iter = lastOffsetInRam ; iter < currentSize ; iter++) {
if(currentAllObjectsInHeap[iter] == JAVA_NULL) {
pos = iter;
lastOffsetInRam = pos;
break;
}
}
if(pos < 0 && lastOffsetInRam > 0) {
// just make sure there is nothing at the start
for(int iter = 0 ; iter < lastOffsetInRam ; iter++) {
if(currentAllObjectsInHeap[iter] == JAVA_NULL) {
pos = iter;
lastOffsetInRam = pos;
break;
}
}
}
if(pos < 0) {
// we need to enlarge the block
JAVA_OBJECT* tmpAllObjectsInHeap = malloc(sizeof(JAVA_OBJECT) * sizeOfAllObjectsInHeap * 2);
memset(tmpAllObjectsInHeap + sizeOfAllObjectsInHeap, 0, sizeof(JAVA_OBJECT) * sizeOfAllObjectsInHeap);
memcpy(tmpAllObjectsInHeap, allObjectsInHeap, sizeof(JAVA_OBJECT) * sizeOfAllObjectsInHeap);
sizeOfAllObjectsInHeap *= 2;
oldAllObjectsInHeap = allObjectsInHeap;
allObjectsInHeap = tmpAllObjectsInHeap;
allObjectsInHeap[currentSizeOfAllObjectsInHeap] = obj;
currentSizeOfAllObjectsInHeap++;
free(oldAllObjectsInHeap);
} else {
allObjectsInHeap[pos] = obj;
}
obj->__heapPosition = pos;
}
}
extern struct ThreadLocalData** allThreads;
extern int nThreadsToKill;
JAVA_BOOLEAN hasAgressiveAllocator;
// the thread just died, mark its remaining resources
void collectThreadResources(struct ThreadLocalData *current)
{
if(current->utf8Buffer != 0) {
free(current->utf8Buffer);
current->utf8Buffer = 0;
}
for(int heapTrav = 0 ; heapTrav < current->heapAllocationSize ; heapTrav++) {
JAVA_OBJECT obj = (JAVA_OBJECT)current->pendingHeapAllocations[heapTrav];
if(obj) {
current->pendingHeapAllocations[heapTrav] = 0;
placeObjectInHeapCollection(obj);
}
}
}
/**
* A simple concurrent mark algorithm that traverses the currently running threads
*/
void codenameOneGCMark() {
currentGcMarkValue++;
init_gc_thresholds();
hasAgressiveAllocator = JAVA_FALSE;
struct ThreadLocalData* d = getThreadLocalData();
//int marked = 0;
// copy the allocated objects from already deleted threads so we can delete that data
//NSLog(@"GC mark, %d dead processes pending",nThreadsToKill);
for(int iter = 0 ; iter < NUMBER_OF_SUPPORTED_THREADS ; iter++) {
lockCriticalSection();
struct ThreadLocalData* t = allThreads[iter];
unlockCriticalSection();
if(t != 0) {
if(t->currentThreadObject != JAVA_NULL) {
gcMarkObject(t, t->currentThreadObject, JAVA_FALSE);
}
if(t != d) {
struct elementStruct* objects = t->threadObjectStack;
// wait for the thread to pause so we can traverse its stack but not for native threads where
// we don't have much control and who barely call into Java anyway
if(t->lightweightThread) {
t->threadBlockedByGC = JAVA_TRUE;
int totalwait = 0;
long now = time(0);
while(t->threadActive) {
usleep(500);
totalwait += 500;
if((totalwait%10000)==0)
{ long later = time(0)-now;
if(later>10000)
{
CN1_LOG("GC trapped for %d seconds waiting for thread %d in slot %d (%d)",
(int)(later/1000),(int)t->threadId,iter,t->threadKilled);
}
}
}
}
// place allocations from the local thread into the global heap list
if (!t->lightweightThread) {
// For native threads, we need to actually lock them while we traverse the
// heap allocations because we can't use the usual locking mechanisms on
// them.
lockThreadHeapMutex();
}
for(int heapTrav = 0 ; heapTrav < t->heapAllocationSize ; heapTrav++) {
JAVA_OBJECT obj = (JAVA_OBJECT)t->pendingHeapAllocations[heapTrav];
if(obj) {
t->pendingHeapAllocations[heapTrav] = 0;
placeObjectInHeapCollection(obj);
}
}
if (!t->lightweightThread) {
unlockThreadHeapMutex();
}
// this is a thread that allocates a lot and might demolish RAM. We will hold it until the sweep is finished...
JAVA_INT allocSize = t->heapAllocationSize;
JAVA_BOOLEAN agressiveAllocator = JAVA_FALSE;
if (isEdt(t->threadId) && !lowMemoryMode) {
agressiveAllocator = allocSize > CN1_AGRESSIVE_ALLOCATOR_THREAD_HEAP_ALLOCATIONS_THRESHOLD_EDT;
} else {
agressiveAllocator = allocSize > CN1_AGRESSIVE_ALLOCATOR_THREAD_HEAP_ALLOCATIONS_THRESHOLD;
}
if (CN1_EDT_THREAD_ID == t->threadId && agressiveAllocator) {
long freeMemory = get_free_memory();
CN1_LOG("[GC] Blocking EDT as aggressive allocator, free memory=%ld", freeMemory);
}
t->heapAllocationSize = 0;
int stackSize = t->threadObjectStackOffset;
for(int stackIter = 0 ; stackIter < stackSize ; stackIter++) {
struct elementStruct* current = &t->threadObjectStack[stackIter];
CODENAME_ONE_ASSERT(current->type >= CN1_TYPE_INVALID && current->type <= CN1_TYPE_PRIMITIVE);
if(current != 0 && current->type == CN1_TYPE_OBJECT && current->data.o != JAVA_NULL) {
gcMarkObject(t, current->data.o, JAVA_FALSE);
//marked++;
}
}
markStatics(d);
if(!agressiveAllocator) {
t->threadBlockedByGC = JAVA_FALSE;
} else {
hasAgressiveAllocator = JAVA_TRUE;
}
}
}
}
//NSLog(@"Mark set %i objects to %i", marked, currentGcMarkValue);
// since they are immutable this probably doesn't need as much sync as the statics...
for(int iter = 0 ; iter < CN1_CONSTANT_POOL_SIZE ; iter++) {
gcMarkObject(d, (JAVA_OBJECT)constantPoolObjects[iter], JAVA_TRUE);
}
}
#ifdef DEBUG_GC_OBJECTS_IN_HEAP
int totalAllocatedHeap = 0;
int getObjectSize(JAVA_OBJECT o) {
int* ptr = (int*)o;
ptr--;
return *ptr;
}
int classTypeCountPreSweep[cn1_array_3_id_java_util_Vector + 1];
int sizeInHeapForTypePreSweep[cn1_array_3_id_java_util_Vector + 1];
int nullSpacesPreSweep = 0;
int preSweepRam;
void preSweepCount(CODENAME_ONE_THREAD_STATE) {
preSweepRam = totalAllocatedHeap;
memset(classTypeCountPreSweep, 0, sizeof(int) * cn1_array_3_id_java_util_Vector + 1);
memset(sizeInHeapForTypePreSweep, 0, sizeof(int) * cn1_array_3_id_java_util_Vector + 1);
int t = currentSizeOfAllObjectsInHeap;
int nullSpacesPreSweep = 0;
for(int iter = 0 ; iter < t ; iter++) {
JAVA_OBJECT o = allObjectsInHeap[iter];
if(o != JAVA_NULL) {
classTypeCountPreSweep[o->__codenameOneParentClsReference->classId]++;
sizeInHeapForTypePreSweep[o->__codenameOneParentClsReference->classId] += getObjectSize(o);
} else {
nullSpacesPreSweep++;
}
}
}
void printObjectsPostSweep(CODENAME_ONE_THREAD_STATE) {
#if defined(__APPLE__) && defined(__OBJC__)
NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
#endif
// this should be the last class used
int classTypeCount[cn1_array_3_id_java_util_Vector + 1];
int sizeInHeapForType[cn1_array_3_id_java_util_Vector + 1];
memset(classTypeCount, 0, sizeof(int) * cn1_array_3_id_java_util_Vector + 1);
memset(sizeInHeapForType, 0, sizeof(int) * cn1_array_3_id_java_util_Vector + 1);
int nullSpaces = 0;
const char** arrayOfNames = malloc(sizeof(char*) * cn1_array_3_id_java_util_Vector + 1);
memset(arrayOfNames, 0, sizeof(char*) * cn1_array_3_id_java_util_Vector + 1);
int t = currentSizeOfAllObjectsInHeap;
for(int iter = 0 ; iter < t ; iter++) {
JAVA_OBJECT o = allObjectsInHeap[iter];
if(o != JAVA_NULL) {
classTypeCount[o->__codenameOneParentClsReference->classId]++;
sizeInHeapForType[o->__codenameOneParentClsReference->classId] += getObjectSize(o);
if(o->__codenameOneParentClsReference->classId > cn1_array_start_offset) {
if(arrayOfNames[o->__codenameOneParentClsReference->classId] == 0) {
arrayOfNames[o->__codenameOneParentClsReference->classId] = o->__codenameOneParentClsReference->clsName;
}
}
} else {
nullSpaces++;
}
}
int actualTotalMemory = 0;
CN1_LOG("\n\n**** There are %i - %i = %i nulls available entries out of %i objects in heap which take up %i, sweep saved %i ****", nullSpaces, nullSpacesPreSweep, nullSpaces - nullSpacesPreSweep, t, totalAllocatedHeap, preSweepRam - totalAllocatedHeap);
for(int iter = 0 ; iter < cn1_array_3_id_java_util_Vector ; iter++) {
if(classTypeCount[iter] > 0) {
if(classTypeCountPreSweep[iter] - classTypeCount[iter] > 0) {
if(iter > cn1_array_start_offset) {
#if defined(__APPLE__) && defined(__OBJC__)
NSLog(@"There are %i instances of %@ taking up %i bytes, %i were cleaned which saved %i bytes", classTypeCount[iter], [NSString stringWithUTF8String:arrayOfNames[iter]], sizeInHeapForType[iter], classTypeCountPreSweep[iter] - classTypeCount[iter], sizeInHeapForTypePreSweep[iter] - sizeInHeapForType[iter]);
#endif
} else {
JAVA_OBJECT str = STRING_FROM_CONSTANT_POOL_OFFSET(classNameLookup[iter]);
#if defined(__APPLE__) && defined(__OBJC__)
NSLog(@"There are %i instances of %@ taking up %i bytes, %i were cleaned which saved %i bytes", classTypeCount[iter], toNSString(threadStateData, str), sizeInHeapForType[iter], classTypeCountPreSweep[iter] - classTypeCount[iter], sizeInHeapForTypePreSweep[iter] - sizeInHeapForType[iter]);
#endif
}
}
actualTotalMemory += sizeInHeapForType[iter];
}
}
//NSLog(@"Actual ram = %i vs total mallocs = %i", actualTotalMemory, totalAllocatedHeap);
CN1_LOG("**** GC cycle complete ****");
free(arrayOfNames);
#if defined(__APPLE__) && defined(__OBJC__)
[pool release];
#endif
}
void printObjectTypesInHeap(CODENAME_ONE_THREAD_STATE) {
#if defined(__APPLE__) && defined(__OBJC__)
NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
#endif
// this should be the last class used
int classTypeCount[cn1_array_3_id_java_util_Vector + 1];
int sizeInHeapForType[cn1_array_3_id_java_util_Vector + 1];
memset(classTypeCount, 0, sizeof(int) * cn1_array_3_id_java_util_Vector + 1);
memset(sizeInHeapForType, 0, sizeof(int) * cn1_array_3_id_java_util_Vector + 1);
int nullSpaces = 0;
const char** arrayOfNames = malloc(sizeof(char*) * cn1_array_3_id_java_util_Vector + 1);
memset(arrayOfNames, 0, sizeof(char*) * cn1_array_3_id_java_util_Vector + 1);
int t = currentSizeOfAllObjectsInHeap;
for(int iter = 0 ; iter < t ; iter++) {
JAVA_OBJECT o = allObjectsInHeap[iter];
if(o != JAVA_NULL) {
classTypeCount[o->__codenameOneParentClsReference->classId]++;
sizeInHeapForType[o->__codenameOneParentClsReference->classId] += getObjectSize(o);
if(o->__codenameOneParentClsReference->classId > cn1_array_start_offset) {
if(arrayOfNames[o->__codenameOneParentClsReference->classId] == 0) {
arrayOfNames[o->__codenameOneParentClsReference->classId] = o->__codenameOneParentClsReference->clsName;
}
}
} else {
nullSpaces++;
}
}
int actualTotalMemory = 0;
CN1_LOG("There are %i null available entries out of %i objects in heap which take up %i", nullSpaces, t, totalAllocatedHeap);
for(int iter = 0 ; iter < cn1_array_3_id_java_util_Vector ; iter++) {
if(classTypeCount[iter] > 0) {
float f = ((float)classTypeCount[iter]) / ((float)t) * 100.0f;
float f2 = ((float)sizeInHeapForType[iter]) / ((float)totalAllocatedHeap) * 100.0f;
if(iter > cn1_array_start_offset) {
#if defined(__APPLE__) && defined(__OBJC__)
NSLog(@"There are %i instances of %@ which is %i percent its %i bytes which is %i mem percent", classTypeCount[iter], [NSString stringWithUTF8String:arrayOfNames[iter]], (int)f, sizeInHeapForType[iter], (int)f2);
#endif
} else {
JAVA_OBJECT str = STRING_FROM_CONSTANT_POOL_OFFSET(classNameLookup[iter]);
#if defined(__APPLE__) && defined(__OBJC__)
NSLog(@"There are %i instances of %@ which is %i percent its %i bytes which is %i mem percent", classTypeCount[iter], toNSString(threadStateData, str), (int)f, sizeInHeapForType[iter], (int)f2);
#endif
}
actualTotalMemory += sizeInHeapForType[iter];
}
}
CN1_LOG("Actual ram = %i vs total mallocs = %i", actualTotalMemory, totalAllocatedHeap);
free(arrayOfNames);
#if defined(__APPLE__) && defined(__OBJC__)
[pool release];
#endif
}
#endif
/**
* The sweep GC phase iterates the memory block and deletes unmarked memory
* since it always runs from the same thread and concurrent work doesn't matter
* it can just delete everything it finds
*/
void codenameOneGCSweep() {
struct ThreadLocalData* threadStateData = getThreadLocalData();
#ifdef DEBUG_GC_OBJECTS_IN_HEAP
preSweepCount(threadStateData);
#endif
//int counter = 0;
int t = currentSizeOfAllObjectsInHeap;
for(int iter = 0 ; iter < t ; iter++) {
JAVA_OBJECT o = allObjectsInHeap[iter];
if(o != JAVA_NULL) {
if(o->__codenameOneGcMark != -1) {
if(o->__codenameOneGcMark < currentGcMarkValue - 1) {
CODENAME_ONE_ASSERT(o->__codenameOneGcMark > 0);
allObjectsInHeap[iter] = JAVA_NULL;
//if(o->__codenameOneReferenceCount > 0) {
// NSLog(@"Sweped %X", (int)o);
//}
#ifdef DEBUG_GC_ALLOCATIONS
int classId = o->className;
#if defined(__APPLE__) && defined(__OBJC__)
NSString* whereIs;
if(classId > 0) {
whereIs = (NSString*)((struct obj__java_lang_String*)STRING_FROM_CONSTANT_POOL_OFFSET(classId))->java_lang_String_nsString;
} else {
whereIs = @"unknown";
}
if(o->__codenameOneParentClsReference->isArray) {
JAVA_ARRAY arr = (JAVA_ARRAY)o;
if(arr->__codenameOneParentClsReference == &class_array1__JAVA_CHAR) {
JAVA_ARRAY_CHAR* ch = (JAVA_ARRAY_CHAR*)arr->data;
char data[arr->length + 1];
for(int iter = 0 ; iter < arr->length ; iter++) {
data[iter] = ch[iter];
}
data[arr->length] = 0;
NSLog(@"Sweeping: %X, Mark: %i, Allocated: %@ %i type: %@, which is: '%@'", (int)o, o->__codenameOneGcMark, whereIs, o->line, [NSString stringWithUTF8String:o->__codenameOneParentClsReference->clsName], [NSString stringWithUTF8String:data]);
} else {
NSLog(@"Sweeping: %X, Mark: %i, Allocated: %@ %i , type: %@", (int)o, o->__codenameOneGcMark, whereIs, o->line, [NSString stringWithUTF8String:o->__codenameOneParentClsReference->clsName]);
}
} else {
JAVA_OBJECT str = java_lang_Object_toString___R_java_lang_String(threadStateData, o);
NSString* ns = toNSString(threadStateData, str);
if(ns == nil) {
ns = @"[NULL]";
}
NSLog(@"Sweeping: %X, Mark: %i, Allocated: %@ %i , type: %@, toString: '%@'", (int)o, o->__codenameOneGcMark, whereIs, o->line, [NSString stringWithUTF8String:o->__codenameOneParentClsReference->clsName], ns);
}
#endif
#endif
removeObjectFromHeapCollection(threadStateData, o);
freeAndFinalize(threadStateData, o);
//counter++;
}
} else {
o->__codenameOneGcMark = currentGcMarkValue;
}
}
}
// we had a thread that really ripped into the GC so we only release that thread now after cleaning RAM
if(hasAgressiveAllocator) {
for(int iter = 0 ; iter < NUMBER_OF_SUPPORTED_THREADS ; iter++) {
lockCriticalSection();
struct ThreadLocalData* t = allThreads[iter];
unlockCriticalSection();
if(t != 0) {
t->threadBlockedByGC = JAVA_FALSE;
}
}
}
#ifdef DEBUG_GC_OBJECTS_IN_HEAP
//printObjectTypesInHeap(threadStateData);
printObjectsPostSweep(threadStateData);
#endif
}
JAVA_BOOLEAN removeObjectFromHeapCollection(CODENAME_ONE_THREAD_STATE, JAVA_OBJECT o) {
// Initialize allObjectsInHeap if it hasn't been initialized yet
// This can happen if GC runs before any objects are allocated
if(allObjectsInHeap == 0) {
allObjectsInHeap = malloc(sizeof(JAVA_OBJECT) * sizeOfAllObjectsInHeap);
memset(allObjectsInHeap, 0, sizeof(JAVA_OBJECT) * sizeOfAllObjectsInHeap);
}
int pos = findPointerPosInHeap(o);
// double deletion might occur when the GC and the reference counting collide
if(pos < 0) {
// check the local thread heap
for(int heapTrav = 0 ; heapTrav < threadStateData->heapAllocationSize ; heapTrav++) {
JAVA_OBJECT obj = (JAVA_OBJECT)threadStateData->pendingHeapAllocations[heapTrav];
if(obj == o) {
threadStateData->pendingHeapAllocations[heapTrav] = JAVA_NULL;
return JAVA_TRUE;
}
}
return JAVA_FALSE;
}
o->__heapPosition = -1;
allObjectsInHeap[pos] = JAVA_NULL;
return JAVA_TRUE;
}
extern JAVA_BOOLEAN gcCurrentlyRunning;
int allocationsSinceLastGC = 0;
long long totalAllocations = 0;
JAVA_BOOLEAN java_lang_System_isHighFrequencyGC___R_boolean(CODENAME_ONE_THREAD_STATE) {
int alloc = allocationsSinceLastGC;
allocationsSinceLastGC = 0;
return alloc > CN1_HIGH_FREQUENCY_ALLOCATION_THRESHOLD && totalAllocations > CN1_HIGH_FREQUENCY_ALLOCATION_ACTIVATED_THRESHOLD;
}
extern int mallocWhileSuspended;
extern BOOL isAppSuspended;
JAVA_OBJECT codenameOneGcMalloc(CODENAME_ONE_THREAD_STATE, int size, struct clazz* parent) {
if(isAppSuspended) {
mallocWhileSuspended += size;
if(mallocWhileSuspended > 100000) {
java_lang_System_startGCThread__(threadStateData);
isAppSuspended = NO;
}
}
allocationsSinceLastGC += size;
totalAllocations += size;
if(lowMemoryMode && !threadStateData->nativeAllocationMode) {
threadStateData->threadActive = JAVA_FALSE;
usleep((JAVA_INT)(1000));
while(threadStateData->threadBlockedByGC) {
usleep((JAVA_INT)(1000));
}
threadStateData->threadActive = JAVA_TRUE;
}
#ifdef DEBUG_GC_OBJECTS_IN_HEAP
totalAllocatedHeap += size;
int* ptr = (int*)malloc(size + sizeof(int));
*ptr = size;
ptr++;
JAVA_OBJECT o = (JAVA_OBJECT)ptr;
#else
JAVA_OBJECT o = (JAVA_OBJECT)malloc(size);
#endif
if(o == NULL) {
// malloc failed! We need to free up RAM FAST!
invokedGC = YES;
threadStateData->threadActive = JAVA_FALSE;
java_lang_System_gc__(getThreadLocalData());
while(threadStateData->threadBlockedByGC) {
usleep((JAVA_INT)(1000));
}
invokedGC = NO;
threadStateData->threadActive = JAVA_TRUE;
return codenameOneGcMalloc(threadStateData, size, parent);
}
memset(o, 0, size);
o->__codenameOneParentClsReference = parent;
o->__codenameOneGcMark = -1;
o->__ownerThread = threadStateData;
o->__heapPosition = -1;
o->__codenameOneReferenceCount = 1;
#ifdef DEBUG_GC_ALLOCATIONS
o->className = threadStateData->callStackClass[threadStateData->callStackOffset - 1];
o->line = threadStateData->callStackLine[threadStateData->callStackOffset - 1];
#endif
if(threadStateData->heapAllocationSize == threadStateData->threadHeapTotalSize) {
if(threadStateData->threadBlockedByGC && !threadStateData->nativeAllocationMode) {
threadStateData->threadActive = JAVA_FALSE;
while(threadStateData->threadBlockedByGC) {
usleep(1000);
}
threadStateData->threadActive = JAVA_TRUE;
}
long maxHeapSize = CN1_MAX_HEAP_SIZE;
if (isEdt(threadStateData->threadId) && !lowMemoryMode) {
maxHeapSize = CN1_MAX_HEAP_SIZE_EDT;
}