question_id
int64 25
74.7M
| answer_id
int64 332
74.7M
| title
stringlengths 20
150
| question
stringlengths 23
4.1k
| answer
stringlengths 20
4.1k
|
|---|---|---|---|---|
73,719,092
| 73,719,154
|
How to reopen the winapi window?
|
I have WinAPI application with menu. I click "Graphics" and choose open or draw. It doesn't matter what exactly. Then I close the child window. When I try to open it again, it doesn't work. Maybe I should put somewhere ShowWindow(hWnd, SW_HIDE). But I don't understand, where it should be. Maybe there is another solution
main window
Here I'll put my code:
Callbacks, which I use
LRESULT CALLBACK DrawProcedure(HWND hWnd, UINT msg, WPARAM wp, LPARAM lp)
{
switch (msg)
{
case WM_PAINT:
{
...
}
case WM_LBUTTONDOWN:
{
...
}
}
return DefWindowProc(hWnd, msg, wp, lp);
}
LRESULT CALLBACK GraphProcedure(HWND hWnd, UINT msg, WPARAM wp, LPARAM lp)
{
switch (msg)
{
case WM_PAINT:
{
...
}
break;
case WM_CREATE:
...
case WM_SIZE:
...
}
return DefWindowProc(hWnd, msg, wp, lp);
}
LRESULT CALLBACK SoftwareMainProcedure(HWND hWnd, UINT msg, WPARAM wp, LPARAM lp)
{
switch (msg)
{
case WM_COMMAND:
switch (wp)
{
case draw_plot:
GraphClass.style = CS_HREDRAW | CS_VREDRAW;
GraphClass.lpfnWndProc = DrawProcedure;
GraphClass.hInstance = hInst;
GraphClass.lpszMenuName = NULL;
GraphClass.lpszClassName = L"graphics";
if (!RegisterClassW(&GraphClass))
{
return -1;
}
gr_draw = CreateWindow(L"graphics", L"DRAW", WS_VISIBLE | WS_BORDER | WS_MAXIMIZE | WS_HSCROLL | WS_VSCROLL | WS_OVERLAPPEDWINDOW, 0, 0, 800, 700, NULL, NULL, hInst, NULL);
break;
case open_plot:
GraphClass.style = CS_HREDRAW | CS_VREDRAW;
GraphClass.lpfnWndProc = GraphProcedure;
GraphClass.hInstance = hInst;
GraphClass.lpszMenuName = NULL;
GraphClass.lpszClassName = L"graphics";
if (!RegisterClassW(&GraphClass))
{
return -1;
}
gr_open = CreateWindow(L"graphics", L"OPEN", WS_VISIBLE | WS_BORDER | WS_MAXIMIZE | WS_OVERLAPPEDWINDOW, 0, 0, 800, 700, NULL, NULL, hInst, NULL);
break;
case OnExitSoftware:
PostQuitMessage(0);
break;
default:
break;
}
break;
case WM_SIZE:
{
...
}
break;
case WM_CREATE:
MainWndAddMenus(hWnd);
MainWndAddWidgets(hWnd);
break;
case WM_DESTROY: // close mainwindow
ExitSoftware();
break;
default:
return DefWindowProc(hWnd, msg, wp, lp);
}
}
|
RegisterClassW(&GraphClass) doesn't work the second time, because the window class is already registered, because you already registered it the first time.
It returns false to tell you that it didn't work, then your code doesn't open the window. To reiterate: You told the computer, that if RegisterClassW(&GraphClass) doesn't work, then it shouldn't open the window.
Solution: Either register the window class the first time you use it (not every time), or register all the window classes when the program starts.
Side question to think about: Why did you tell the computer to do nothing if RegisterClassW(&GraphClass) doesn't work? If you told it to pop up a message box saying "RegisterClassW(&GraphClass) didn't work", you'd know where the problem was, because the message box would tell you.
|
73,719,101
| 73,720,808
|
Connecting a C++ program to a Python script with shared memory
|
I'm trying to connect a C++ program to python using shared memory but I don't know how to pass the name of the memory segment to python.
Here is my C++ code:
key_t key = ftok("address", 1);
int shm_o;
char* msg = "hello there";
int len = strlen(msg) + 1;
void* addr;
shm_o = shmget(key, 20, IPC_CREAT | 0600);
if(shm_o == -1)
{
std::cout << "Failed: shmget.\n";
return 1;
}
addr = shmat(shm_o, NULL, 0);
if(addr == (void*) -1)
{
std::cout << "Failed: shmat.\n";
return 1;
}
std::cout << "Shared memory segment created successfully with id: " << shm_o;
memcpy(addr, msg, len);
getchar();
return 0;
I'm trying to get python to read from the shared memory segment like so:
shm_a = shared_memory.SharedMemory(name="address", create=False, size=20)
print(bytes(shm_a.buf[:11]))
but it throws an exception saying there is no file or directory called 'address'.
Am I going about this correctly or is there another way to attach python to the shared memory segment?
Any help would be much appreciated.
|
Taking the liberty to post a working example here for POSIX shared memory segments, which will work across C/C++ and Python on Linux/UNIX-like systems. This will not work on Windows.
C++ code to create and write data into a shared memory segment (name provided on command line):
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <iostream>
#include <string>
int main(int argc, char * argv[])
{
if (argc != 2) {
std::cerr << "Argument <shmem_name> required" << std::endl;
return 1;
}
const char * shmem_name = argv[1];
size_t shm_size = 4096;
int shmem_fd = shm_open(shmem_name, O_CREAT|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP);
if (shmem_fd == -1) {
perror("shm_open");
return 1;
}
std::cout << "Shared Memory segment created with fd " << shmem_fd << std::endl;
if (ftruncate(shmem_fd, shm_size) == -1) {
perror("ftruncate");
return 1;
}
std::cout << "Shared Memory segment resized to " << shm_size << std::endl;
void * addr = mmap(0, shm_size, PROT_WRITE, MAP_SHARED, shmem_fd, 0);
if (addr == MAP_FAILED) {
perror("mmap");
return 1;
}
std::cout << "Please enter some text to write to shared memory segment\n";
std::string text;
std::getline(std::cin, text);
while (! text.empty()) {
strncpy((char *)addr, text.data(), shm_size);
std::cout << "Written '" << text << "' to shared memory segment\n";
std::getline(std::cin, text);
}
std::cout << "Unlinking shared memory segment." << std::endl;
shm_unlink(shmem_name) ;
}
Python code to read any string from the beginning of the shared memory segment:
import sys
from multiprocessing import shared_memory, resource_tracker
if len(sys.argv) != 2:
print("Argument <shmem_name> required")
sys.exit(1)
shm_seg = shared_memory.SharedMemory(name=sys.argv[1])
print(bytes(shm_seg.buf).strip(b'\x00').decode('ascii'))
shm_seg.close()
# Manually remove segment from resource_tracker, otherwise shmem segment
# will be unlinked upon program exit
resource_tracker.unregister(shm_seg._name, "shared_memory")
|
73,719,982
| 73,720,304
|
Helper function to construct 2D arrays
|
Am I breaking C++ coding conventions writing a helper function which allocates a 2D array outside main()? Because my application calls for many N-dimensional arrays I want to ensure the same process is followed. A prototype which demonstrates what I am doing :
#include <iostream>
// my helper function which allocates the memory for a 2D int array, then returns its pointer.
// the final version will be templated so I can return arrays of any primitive type.
int** make2DArray(int dim1, int dim2)
{
int** out = new int* [dim1];
for (int i = 0; i < dim2; i++) { out[i] = new int[dim2];}
return out;
}
//helper function to deallocate the 2D array.
void destroy2DArray(int** name, int dim1, int dim2)
{
for (int i = 0; i < dim2; i++) { delete[] name[i]; }
delete[] name;
return;
}
int main()
{
int** test = make2DArray(2,2); //makes a 2x2 array and stores its pointer in test.
//set the values to show setting works
test[0][0] = 5;
test[0][1] = 2;
test[1][0] = 1;
test[1][1] = -5;
// print the array values to show accessing works
printf("array test is test[0][0] = %d, test[0][1] = %d, test[1][0] = %d, test[1][1] = %d",
test[0][0],test[0][1],test[1][0],test[1][1]);
//deallocate the memory held by test
destroy2DArray(test,2,2);
return 0;
}
My concern is this may not be memory-safe, since it appears I am allocating memory outside of the function in which it is used (potential out-of-scope error). I can read and write to the array when I am making a single small array, but am worried when I scale this up and there are many operations going on the code might access and alter these values.
I may be able to sidestep these issues by making an array class which includes these functions as members, but I am curious about this as an edge case of C++ style and scoping.
|
There is a difference between allocating 2D arrays like this and what you get when you declare a local variable like int ary[10][10] that based on your statement
My concern is that this operation may not be memory-safe, since it
appears that I am allocating memory for an array outside of the
function in which it is used (potential out-of-scope error)
I am guessing you do not fully understand.
You are allocating arrays on the heap. Declaring a local variable like int ary[10][10] places it on the stack. It is the latter case where you need to worry about not referencing that memory outside of its scope-based lifetime; that is, it is the following that is totally wrong:
//DON'T DO THIS.
template<size_t M, size_t N>
int* make2DArray( ) {
int ary[M][N];
return reinterpret_cast<int*>(ary);
}
int main()
{
auto foo = make2DArray<10, 10>();
}
because ary is local to the function and when the stack frame created by the call to make2DArray<10,10> goes away the pointer the function returns will be dangling.
Heap allocation is a different story. It outlives the scope in which it was created. It lasts until it is deleted.
But anyway, as others have said in comments, your code looks like C not C++. Prefer an std::vector<std::vector<int>> rather than rolling your own.
|
73,720,038
| 74,635,915
|
Why would SECBUFFER_EXTRA point to the inside of SECBUFFER_STREAM_TRAILER after calling DecryptMessage?
|
We have a client application on a Windows 7 SP1 VM with the appropriate hotfixes and registry settings to enable TLS 1.2 communication. We have a server application on a Windows Server 2019 Datacenter VM. The client and server establish a TLS 1.2 session (according to QueryContextAttributes) and the negotiated stream trailer size is 48 bytes (again, according to QueryContextAttributes).
When either the client or server calls DecryptMessage with four buffers (one SECBUFFER_DATA and three SECBUFFER_EMPTY), the output buffers are of type SECBUFFER_STREAM_HEADER, SECBUFFER_DATA, SECBUFFER_STREAM_TRAILER, and SECBUFFER_EXTRA.
In spite of the negotiated stream trailer size being some negotiated number of bytes (eg. 48 as above), the SECBUFFER_STREAM_TRAILER seems to always be smaller by a few bytes and the SECBUFFER_EXTRA points to the first byte after the real end of the stream trailer.
For example, if the negotiated stream trailer size was 48 bytes and we decrypted a message packet with a SECBUFFER_STREAM_TRAILER buffer with a cbBuffer of 45 bytes, then the SECBUFFER_EXTRA buffer would be present and point to the stream trailer's buffer + 45 and would have a cbBuffer of 3.
This seems extremely odd to me. In an extremely technical sense, it is valid (the extra buffer is simply used to point to data which was not consumed by the previous call to DecryptMessage, and those 3 bytes in the example above were not). We've resolved the issue by calculating whether the extra buffer is within the negotiated stream trailer block and shifting the extra buffer pointer past those bytes if it is, but it seems strange that the security provider would even report those bytes as "extra data" or that a message packet would use fewer bytes for its stream header and/or stream trailer.
Notably, this behavior does not seem to be present when both the client and server are on more modern operating systems than Windows 7 (tested on Windows Server 2012 R2 Standard, Windows Server 2019 Datacenter, and Windows 10).
|
The negotiated stream sizes represent the maximum possible value of the header and trailer. The actual values may be less. When the client was sending TLS application data messages, Schannel would sometimes construct a trailer that was smaller than the negotiated size but our application would always send the [maximum header size] + [application data size] + [maximum trailer size] bytes which would result in the server receiving extra null bytes. Schannel's DecryptMessage would then report those extra bytes as extra data but from our perspective that data was "inside" the trailer.
As mentioned previously, we resolved the issue by calculating whether the extra buffer was inside what we thought was the trailer and would move the extra data pointer to point to just after those null bytes (if there was any data after those null bytes) which made everything else work.
However, properly implemented clients would fail to communicate with the server over TLS 1.0 and TLS 1.1 sessions. This led me to fix the bug in our encryption logic which would result in our applications sending extra null bytes and the bug in our decryption logic which would result in applications invalidly skipping over those (bad) null bytes.
|
73,720,206
| 73,720,263
|
Why does underflow with floating points happen at 2⁻¹²⁶?
|
When representing a float, why does the exponent face underflow when it hits 2−126 if 8 bits can hold everything from −127 (incl.) to 128 (incl.)?
|
Exponents range from −126 to +127 because exponents of −127 (all 0s) and +128 (all 1s) are reserved for special numbers. wikipedia
|
73,720,552
| 73,814,304
|
CPPZMQ - Publish and subscribe with standard vector
|
From the documentation at: https://brettviren.github.io/cppzmq-tour/index.html#intro, it seems that it is possible with CPPZMQ to send and receive a standard vector by using messages or buffers. However, I have not been able to use the vector from the subscriber, I get an error when trying to access it:
Segmentation error (core dumped)
when I run the following code:
Publisher:
#include <vector>
#include <iostream>
#include <zmq.hpp>
#include <thread>
using namespace std;
using namespace zmq;
int main()
{
vector<float> v(2, 0.0);
context_t ctx;
socket_t pub(ctx, ZMQ_PUB);
const std::string addr = "tcp://127.0.0.1:5678";
pub.bind(addr);
while (true)
{
std::this_thread::sleep_for(std::chrono::milliseconds(100));
v = {0.1, 0.2};
message_t msg(v);
auto res = pub.send(msg, send_flags::none);
cout << "message sent" << endl;
}
}
Subscriber:
#include <vector>
#include <iostream>
#include <zmq.hpp>
using namespace std;
using namespace zmq;
int main()
{
context_t ctx;
socket_t sub(ctx, socket_type::sub);
const std::string addr = "tcp://127.0.0.1:5678";
sub.set(zmq::sockopt::subscribe, "");
sub.connect(addr);
message_t msg;
const vector<float>* iptr = msg.data<vector<float>>();
while (true)
{
if (sub.recv(msg, zmq::recv_flags::none))
{
cout << "msg received" << endl;
iptr = msg.data<vector<float>>();
cout << "iptr: " << iptr << endl;
cout << "element 0: " << (*iptr)[0] << "endl";
}
}
}
My question is:
How do I retrieve the vector in the publisher ? More generally, with a vector of constant length and type, I would need an efficient way to send and receive such vector, for example avoiding copy and avoiding reallocation and destruction at every message. What is the recommended way to do that ?
|
Constructing zmq::message_t directly from STL vector is ok, because iterator based constructor will be called.
std::vector<float> v({0.1, 0.2});
message_t msg(v);
It will internally copy the content of the vector to the underlying zmq_msg_data casted to float*:
std::copy(first, last, data<value_t>()); // value_t == float in this case
However, templated version of message_t::data method
template<typename T> T *data() ZMQ_NOTHROW { return static_cast<T *>(data()); }
is just a static cast from void* to your T*. So if you invoke it like that: msg.data<vector<float>>(); you try to cast raw buffer to complex std::vector class, which is incorrect. Instead, you can copy received raw data to newly created float vector:
std::vector<float> vec;
vec.resize(msg.size()/sizeof(float)); // note that msg size must be divisible by float size
std::memcpy(vec.data(), msg.data(), msg.size());
Alternatively you can get already casted data float* rawData = msg.data<float>(); and use
std::copy(rawData, rawData+msg.size()/sizeof(float), std::back_inserter(vec));
|
73,720,619
| 73,721,052
|
This code that is supposed to get the largest and average of input numbers is not giving me the desired output, what did I get wrong?
|
I've been trying to get this code to give me an output of "20 9.50" when I input "15 20 0 3 -1" but it keeps giving me the output "20 0.55". This is the code I've made:
#include <iostream>
#include <iomanip>
using namespace std;
int main() {
int largest = 0;
int number = 0;
int count = 0;
double avg = 0;
while (number >= 0) {
cin >> number;
if (number >= 0) {
if (number > largest) {
largest = number;
}
avg += number; ++count;
}
if (count > 0) {
avg /= count;
}
}
cout << largest << " " << fixed << setprecision(2) << avg << endl;
return 0;
}
Can anyone point out how I'm getting the wrong output? I'm in a class for C++ but I'm still new to it. Hopefully I'm not treated too harshly, any help is appreciated.
|
I am not sure if you are supposed to maintain the average "on the fly".
The better, less error-prone, approach is to maintain a total for the life of the loop. Then, after the while loop, you should average by dividing the total by the count.
FYI, for everyone else: The -1 ends the while loop and total & count should not include the -1 at the ending of every input.
The following code gives output of 20 9.50 with input of 15 20 0 3 -1.
#include <iostream>
#include <iomanip>
using namespace std;
int main() {
int largest = 0;
int number = 0;
int count = 0;
double avg = 0;
double total = 0;
while (number >= 0) {
cin >> number;
if (number >= 0) {
if (number > largest) {
largest = number;
}
//avg += number; ++count;
total += number; ++count;
}
//if (count > 0) {
// avg /= count;
}
avg = total / count;
cout << largest << " " << fixed << setprecision(2) << avg << endl;
return 0;
}
|
73,721,288
| 73,721,442
|
How to execute a slot or a function with two signals in QT?
|
I'm working on a QT project. I was wondering if is possible to create a connection using two signals to execute a method.
I have three classes: A, B and C. Class A emit a signal when a button is pressed (connected in Class C), also in Class C a QProcess is created (from an instance of class B).
In class C I have a connect to get the output of the QProcess. Also, I have another connect to execute a method when a button is pressed (signal emitted in Class A).
So, I need to modify the current behavior, the doSomething() method should be executed when the QProcess output is ready, similar to an if statement:
if(signalA && signal B){
do something...
}
This is the current code:
//This signal will get the output of a Qprocess
connect(objectC,&QProcess::readyReadStandardOutput, [=] (){
QString out = objectB->readAllStandardOutput();
qDebug() << out;
});
//When a button in Class A is pressed doSomething() is executed
//but now I must wait until the above signal generates an output.
connect(objectA, ClassA::buttonPressed, [=] (){
doSomething();
});
objectC -> run(args); // This line execute an external process
any Ideas on how to achieve this?
I found on stackoverflow that in QT we can have two signals in a single connect, like this:
connect(this,SIGNAL(someSignal()),this,SIGNAL(anotherSignal()));
but I don't know how to adapt this to my problem.
The flow of my QT application is the following:
A QProcess is created to launch an external .exe file
The QProcess return a QString
At any moment the user can press a button that will execute a specific method (doSomething()) but needs the output of the QProcess.
The big issue is that sometimes the user press the button before the QProcess ends, so I cannot execute the doSomething() correctly.
The desired behavior is: if the user press the button to execute doSomething(), first I must wait until the output of the QProcess is ready. So if I press the button and if the QProcess takes e.g 10s to finish, the doSomething() should be executed after this 10s.
|
You can store the state of both events and check them both whenever one of them changes.
// These should probably be defined in the header of your class.
bool processFinished = false;
bool buttonClicked = false;
void checkState() {
if (buttonClicked && processFinished) {
doSomething();
}
}
connect(objectC,&QProcess::readyReadStandardOutput, [=] (){
...
processFinished = true;
checkState();
});
connect(objectA, ClassA::buttonPressed, [=] (){
...
buttonClicked = true;
checkState();
});
|
73,721,865
| 73,722,343
|
c++ async multiple tasks implementation
|
I have used the tasks in c# in this way:
static async Task<string> DoTaskAsync(string name, int timeout)
{
var start = DateTime.Now;
Console.WriteLine("Enter {0}, {1}", name, timeout);
await Task.Delay(timeout);
Console.WriteLine("Exit {0}, {1}", name, (DateTime.Now - start).TotalMilliseconds);
return name;
}
public Task DoSomethingAsync()
{
var t1 = DoTaskAsync("t2.1", 3000);
var t2 = DoTaskAsync("t2.2", 2000);
var t3 = DoTaskAsync("t2.3", 1000);
return Task.WhenAll(t1, t2, t3);
}
but I need to do the some in c++.
There is a way to migrate this code in c++ ?
Thanks !
|
A more or less direct translation would probably use C++ std::futures returned by std::async.
Disclaimer: I don't have bit of C# knowledge and just read about its Tasks a bit just now. I think this is what you are going for.
In C++ we use std::chrono::durations instead of plain ints. The resolution of the clocks is often much higher than milliseconds, so I selected to go for whatever duration the std::chrono::steady_clock supports. You can thereby pass it a std::chrono::milliseconds or std::chrono::seconds - and it will do the right thing.
#include <array>
#include <chrono> // clocks and durations
#include <format> // std::format
#include <future> // std::async
#include <iostream>
#include <string>
#include <thread> // std::this_thread::sleep_for
auto DoTaskAsync(std::string name, std::chrono::steady_clock::duration timeout)
{
return std::async(std::launch::async,
[name=std::move(name),timeout] {
auto start = std::chrono::steady_clock::now();
std::cout << std::format("Enter {0}, {1}\n", name, timeout);
std::this_thread::sleep_for(timeout);
std::cout << std::format("Exit {0}, {1}\n", name,
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - start));
return name;
});
}
std::array<std::string, 3> DoSomethingAsync() {
using namespace std::chrono_literals;
auto t1 = DoTaskAsync("t2.1", 3000000000ns); // nanoseconds
auto t2 = DoTaskAsync("t2.2", 2000ms); // milliseconds
auto t3 = DoTaskAsync("t2.3", 1s); // seconds
return {t1.get(), t2.get(), t3.get()};
}
Demo
|
73,721,970
| 73,722,549
|
How to construct a zip file with libzip
|
I try to create a compressed file and insert an xml file in it using two libraries (pugixml / libzip), everything goes without error, but when I open the xml file, the encoding at the beginning of the file is weird :
Main.cpp :
#include <iostream>
#include <sstream>
#include <zip.h>
#include <pugixml.hpp>
#include <memory>
using namespace std;
int main()
{
auto document = std::unique_ptr<pugi::xml_document>(new pugi::xml_document);
pugi::xml_node declNode = document->prepend_child(pugi::node_declaration);
declNode.append_attribute("version") = "1.0";
declNode.append_attribute("encoding") = "UTF-8";
declNode.append_attribute("standalone") = "yes";
pugi::xml_node rootNode = document->append_child("Document");
rootNode.append_child("Files");
int err = 0;
zip_t* zip = zip_open("test.zip", ZIP_CREATE, &err);
{
{
std::stringstream ss;
document->save(ss, " ");
std::string buffer = ss.str();
auto src = zip_source_buffer_create(buffer.c_str(),buffer.length(),0,0);
zip_file_add(zip,"Document.xml",src,ZIP_FL_ENC_UTF_8);
}
}
zip_close(zip);
return 0;
}
Document.xml :
~U Ä U rsion="1.0" encoding="UTF-8" standalone="yes"?>
<Document>
<Files />
</Document>
Hex :
|
The posted program has undefined behaviour due to reading already freed memory.
In the example you posted the zip_source_t gets creatd with freep = 0, so you need to make sure that the provided buffer remains valid for the entire lifetime of the zip_source_t object:
zip_source_buffer
zip_source_t * zip_source_buffer_create(const void *data, zip_uint64_t len, int freep, zip_error_t *error);
The functions zip_source_buffer() and zip_source_buffer_create() create a zip source from the buffer data of size len. If freep is non-zero, the buffer will be freed when it is no longer needed.
data must remain valid for the lifetime of the created source.
The source can be used to open a zip archive from.
zip_file_add() (if successfull) will take ownership of the source you give it, but note that it is not required to free the source immediately - it could for example store it within the zip_t.
As it is currently implemented zip_file_add() does not free the zip_source_t - it instead hangs onto it and writes it out once you call zip_close(). So your buffer needs to remain valid for the entire remaining lifetime of the zip_t object - in this case this is until the call to zip_close().
If you rewrite your example to keep the std::string buffer; alive until after you've closed the zip_t the resulting file should be correct:
zip_t* zip = zip_open("test.zip", ZIP_CREATE, &err);
std::stringstream ss;
document->save(ss, " ");
std::string buffer = ss.str();
auto src = zip_source_buffer_create(buffer.c_str(),buffer.length(),0,0);
zip_file_add(zip,"Document.xml",src,ZIP_FL_ENC_UTF_8);
zip_close(zip); // lifetime of zip_t ends here (which will also end the lifetime of the zip_source_t)
// lifetime of buffer ends at end of scope
Recommendation
There is no way to know how long the zip_source_t will be alive (it's reference-counted), and so it's not easy to know for how long exactly you need to keep your buffer alive.
So I would recommend allocating the memory for the zip_source_t separately with malloc() and passing freep=1 to zip_source_buffer_create().
That way the buffer will remain valid as long as the zip_source_t is alive.
|
73,722,250
| 73,723,722
|
Qualified names for Julia's `Base` functions/types
|
Some C++ developers strongly suggest to never use using namespace std, as they prefer to explicitly include the namespace of every function in their code. Reasons for this include clarity when reading code as well as preventing shadowing definitions with equal identifier. According to Julia's documentation, all modules and types already contain using Base (and other) statements, so fully qualified names are in practice not necessary. (There is a way to overrule this default behavior, though, as explained in the documentation.) Is there a style consensus among Julia programmers whether to use fully qualified names for Base functions and types when writing modules? That is, for instance, writing Base.println(some_string) vs println(some_string).
|
Is there a style consensus among Julia programmers whether to use fully qualified names for Base functions and types when writing modules?
The consensus is not to use fully qualified names.
Does Julia Base have that same issue?
No. Package developers are aware of names in Base, and to not overshadow them.
Edit
See e.g. Blue Style guide.
There are there are many more style rules in the guide, but the relevant is:
Prefer the use of using over import to ensure that extension of a function is always explicit and on purpose:
# Yes:
using Example
Example.hello(x::Monster) = "Aargh! It's a Monster!"
Base.isreal(x::Ghost) = false
# No:
import Base: isreal
import Example: hello
hello(x::Monster) = "Aargh! It's a Monster!"
isreal(x::Ghost) = false
Which is a recommendation for package developers to extend Base Julia functions and not overshadow them. (which in turns means that you can safely use using as a user)
|
73,722,302
| 73,723,285
|
Determine if a generic type is a primitive or enum with underlying primitive at compile time C++ in a function
|
I'm trying to determine at compile time within a template function if the type T is either a primitive type or an enum with underlying primitive type.
In code, I'm trying to do this
template <typename T>
bool foo(T& input)
{
bool isPrimitive = std::is_fundamental<T>::value || (std::is_enum<T>::value && std::is_fundamental<std::underlying_type<T>::type>::value);
// We want to do things using isPrimitive, but not important so omitted.
return isPrimitive; //return this value just to avoid warnings.
}
This fails to compile when foo is invoked with T not being an enum, since std::underlying_type<T>::type doesn't exist in that case.
enum class Bar : int
{
DUMMYCASE,
};
int main()
{
int test1;
Bar test2;
foo(test1); // fails
foo(test2); // ok
}
I've looked at std::conditional and std::enable_if, as well as this answer SFINAE not happening with std::underlying_type but cannot see how this can be done. It seems like enable_if only works in template params.
How can I rewrite the line such that it compiles when T is not an enum? Ideally I would like to avoid changing the function signature.
bool isPrimitive = std::is_fundamental<T>::value || (std::is_enum<T>::value && std::is_fundamental<std::underlying_type<T>::type>::value);
|
Beside the fact that enums support only integral types you could write:
template <typename T>
bool foo(T& input)
{
constexpr bool isPrimitive = [](){
if constexpr (std::is_fundamental_v<T>){
return true;
}
if constexpr (std::is_enum_v<T>){
using underlying_type = typename std::underlying_type<T>::type;
if constexpr (std::is_fundamental_v<underlying_type>){
return true;
}
}
return false;
}();
// We want to do things using isPrimitive, but not important so omitted.
return isPrimitive; //return this value just to avoid warnings.
}
The problem with your code is, that std::underlying_type is instantiated for non-enums, too.
In addition there is a missing typename disambiguator.
std::underlying_type<T>::type is a dependent name and must be prefixed with "typename" - otherwise the compiler asumes a non-type.
|
73,722,798
| 73,771,325
|
ImGUI Popup not showing up but executing the code
|
I am making a program using ImGui and I want to display a PopUp if the input on one window is bad after clicking the button "OK". It enter the IF statement and execute the code but the popup doesnt show up.
ImGui::OpenPopup("Error Creating Image");
// Always center this window when appearing
ImVec2 center = ImGui::GetMainViewport()->GetCenter();
ImGui::SetNextWindowPos(center, ImGuiCond_Appearing, ImVec2(0.5f, 0.5f));
if (ImGui::BeginPopupModal("Error Creating Image", NULL, ImGuiWindowFlags_AlwaysAutoResize)) {
ImGui::SetItemDefaultFocus();
ImGui::Text("The size of the Image must be greater than 0. Also it need to have a name!\n\n");
ImGui::Separator();
if (ImGui::Button("OK")) {
ImGui::CloseCurrentPopup();
}
ImGui::EndPopup();
}
|
Does the entire code you are showing only run once, when the error occured?
The ImGui::BeginPopupModal and the associated if block has to run every frame, otherwise the popup won't get drawn.
Something like this:
void foo() { // 'foo' runs every frame.
if (ImGui::Button("Show popup"))
ImGui::OpenPopup("ThePopup");
// Maybe some other stuff here.
if (ImGui::BeginPopupModal("ThePopup")) {
// Draw popup contents.
ImGui::EndPopup();
}
}
The code for drawing the popup can be moved anywhere, as long as it's on the same level of the ID stack.
|
73,723,010
| 73,723,063
|
Assigning a class variable in class definition versus at class instantiation
|
What are the ramifications of assigning a class variable when defining the class versus in the class constructor? Is the variable assigned in the class definition accessible by all class instances?
Example of assignment at instantiation:
class Foo
{
private:
int x;
double y;
public:
Foo()
{
x = 0;
y = 1.;
}
};
Example of assignment in class definition:
class Foo
{
private:
int x = 0;
double y = 1.;
public:
Foo();
};
edit:
As to the class member being accessible by all instances, I think I was looking for the notion of a static declaration, I guess I'm just new to the curly brace languages.
|
In this code snippet
int x = 0;
double y = 1.;
there is no assignments. There are initializations.
In this code snippet
Foo()
{
x = 0;
y = 1.;
}
there is indeed used the assignment operator.
In general for objects of complex types it can be 1) impossible (either the default constructor or the assignment operator is not available) or 2) requiring many resources because at first default constructors are called that create the objects and after that there are called assignment operators.
It is desirable to define constructors at least like for example
Foo() : x( 0 ), y( 1 )
{
}
using mem-initializer lists.
|
73,723,151
| 73,723,663
|
How to achieve encapsulation in C++ project
|
I'm currently learning about OOP design patterns and I'm working on a project whose main class is roughly organized as follows:
class MainClass {
public:
MainClass(int something, CrazyTypeOfAlgorithm algoType);
double getResult();
private:
std::vector<double> _numbers;
CrazyTypeOfAlgorithm _algoType;
};
where CrazyTypeOfAlgorithm is an enum. Basically, depending on the specific algorithm used, the getResult() function acts accordingly. So far, I've been using simple switch statements in the implementation of the latter. As this class will grow a lot when further algorithms are introduced, I want to encapsulate the algorithm in its own class somehow. I tried out implementing the Strategy Pattern but end up with the problem that the classes implementing different algorithms need to be friend classes of MainClass - as they need to access private member variables of MainClass (e.g. _numbers). How would I accomplish proper and clean encapsulation in this situation?
|
A common approach would be to make Algorithm a pure virtual (interface) class with various implementations (like AlgorithmSimpleSum below), a bit like this:
// Pure Virtual Interface
class Algorithm
{
public:
virtual ~Algorithm() = default;
virtual double process(double const* begin, double const* end) const = 0;
};
class MainClass {
public:
MainClass(std::vector<double> const& numbers, std::unique_ptr<Algorithm> algorithm)
: numbers(numbers), algorithm(std::move(algorithm)) {}
double getResult() { return algorithm->process(numbers.data(), numbers.data() + numbers.size()); }
private:
std::vector<double> numbers;
std::unique_ptr<Algorithm> algorithm;
};
// Concrete implementation of an Algorithm
class AlgorithmSimpleSum
: public Algorithm
{
public:
double process(double const* begin, double const* end) const override
{
double r = 0.0;
while(begin != end)
r += *begin++;
return r;
}
};
You can make the parameter to the Algorithm::process() function a std::vector<double> const&, but that is a bit less generic/flexible, although it has the advantage of being simpler.
|
73,723,352
| 73,728,329
|
Drag and Drop Item list not working properly on ImGUI
|
Im using ImGUI and I want to implement a layer menu for the images and to move them im using
Drag to reorder items in a vector.
Sometimes it works just fine but others the images just jumps from the current position to a random one.
for (int i = 0; i < this->Images->size(); i++) {
ImGui::Image((void*)(intptr_t)this->Images->at(i).texture, ImVec2(100 * temp_percentage, 100 * temp_percentage));
ImGui::SameLine();
ImGui::Selectable(this->Images->at(i).name.c_str());
if (ImGui::IsItemActive() && !ImGui::IsItemHovered())
{
int n_next = i + (ImGui::GetMouseDragDelta(0).y < 0.f ? -1 : 1);
if (n_next >= 0 && n_next < this->Images->size())
{
std::swap(this->Images->at(i), this->Images->at(n_next));
*this->CurrentImage = this->Images->front();
centerImage();
ImGui::ResetMouseDragDelta();
}
}
ImGui::Separator();
}
|
The problem lies at !ImGui::IsItemHovered(), there is small spacing between the lines (cell, selectable,... ), so when the mouse hovers over that spacing, the item isn't hovered but still is actived, and therefore will execute the swap and reset mouse delta multiple times making it goes to the top or bottom of the list. This will also happen if the mouse goes out of the table/window bounds.
To make the problem more visible, you can make the spacing bigger using ImGui::GetStyle().ItemSpacing.y = 50.f;.
To actually fix the problem, you'll have to calculate the item index using the mouse position, here is a way to do it, tho not perfect but it works.
ImGuiStyle& style = ImGui::GetStyle();
ImVec2 windowPosition = ImGui::GetWindowPos();
ImVec2 cursorPosition = ImGui::GetCursorPos();
// this is not a pixel perfect position
// you can try to make it more accurate by adding some offset
ImVec2 itemPosition (
windowPosition.x + cursorPosition.x,
windowPosition.y + cursorPosition.y - style.ItemSpacing.y
);
for (int i = 0; i < this->Images->size(); i++) {
ImGui::Image((void*)(intptr_t)this->Images->at(i).texture, ImVec2(100 * temp_percentage, 100 * temp_percentage));
ImGui::SameLine();
ImGui::Selectable(this->Images->at(i).name.c_str());
if (ImGui::IsItemActive() && ImGui::IsMouseDragging(0))
{
int n_next = floorf((ImGui::GetMousePos().y - itemPosition.y) / itemHeight);
if (n_next != i && n_next >= 0 && n_next < this->Images->size())
{
std::swap(this->Images->at(i), this->Images->at(n_next));
*this->CurrentImage = this->Images->front();
centerImage();
}
}
ImGui::Separator();
}
There is also another problem in your code, if there are multiple items with the same name, ImGui::IsItemActive() will return true for all of them if one is actived.
You can fix this easily by adding ##some_unique_string after the name, for example ImGui::Selectable("Image#image_1") will just display Image.
|
73,723,354
| 73,850,966
|
How to compile with c++ <execution> standard library
|
The issue
I am trying to use the execution policies in the standard algorithm library. However, when I try to compile I get the following error message
c:\mingw\lib\gcc\mingw32\9.2.0\include\c++\pstl\parallel_backend_tbb.h:19:10: fatal error: tbb/blocked_range.h: No such file or directory
After looking at various other related questions such as this or this, I understand that the execution library depends upon a software called tbb. Moreover in order to compile code which uses <execution> one has to manually link to tbb. My issue is precisely with how to download and link tbb to a script that uses <execution>.
I believe I have some serious gaps in my understanding in terms of how one downloads the correct files and then links to them. I will first make a list with my understanding of the linking process and then I will explain what I have tried to fix the issue. I have chosen this format so that it is faster for the one to answer my question to point at the issue at fault. I will attempt to keep this as concise as possible.
My understanding
Code is organized in header and cpp files, where the former usually only contain the interface to the software and the later the implementation
The cpp files can be pre-compiled and grouped into a single library file
For a user to then use the library, they have to #include the header/s in their script and also tell the compiler where the header files as well as the library file, are located
This can be done with the -I for the headers and -L, -l for the library file
-L provides the location of the library files, the -l specifies which libraries to use
What I tried
The script I try to compile is:
#include <execution>
int main() {
std::execution::par;
return 0;
}
with
g++ script.cpp -o out -I C:(path to the headers) -L C:(path to the library) -l (name of library) -std=c++17
I should also mention I am trying to do this on Windows 10
1st attempt
I had a particularly hard time understanding where to find the header and library files for tbb.
In the Intel getting started with TBB webpage, this github repository is listed as "TBB being available at". As I am used to header-only libraries I thought everything would be in the include directory but no .dll files where there. It is now my understanding that I have to compile the DLLs myself for my specific system which makes sense. I followed the following process using cmake:
# Do our experiments in /tmp
cd /tmp
# Clone oneTBB repository
git clone https://github.com/oneapi-src/oneTBB.git
cd oneTBB
# Create binary directory for out-of-source build
mkdir build && cd build
# Configure: customize CMAKE_INSTALL_PREFIX and disable TBB_TEST to avoid tests build
cmake -DCMAKE_INSTALL_PREFIX=/tmp/my_installed_onetbb -DTBB_TEST=OFF ..
# Build
cmake --build
# Install
cmake --install .
# Well done! Your installed oneTBB is in /tmp/my_installed_onetbb
However at the cmake --build step, cmake does not accept the command but requests more options. One of them is the dir option, which for which I made another directory and supplied it but then the error message Error: could not load cache printed out.
In any case, some files had been created so I searched for the .dll file but could not find it.
2nd attempt
I downloaded the Intel oneAPI Base Toolkit as is suggested here. After the installation at ../Program Files (x86)/Intel/oneAPI I found the specific tbb tool at C:\Program Files (x86)\Intel\oneAPI\tbb and I used this address for the -I and -L flags but initial error message persists.
I also copied the directory C:\Program Files (x86)\Intel\oneAPI\tbb\2021.6.0 to the local directory of the script so I could link with -flag tbb\2021.6.0 but no luck
Many thanks
|
Yes you were correct.
By using the package mingw-w64-tbb. You can use -ltbb12 instead of -ltbb. As the library files are related to ltbb12.
For using -ltbb option, you should set the Intel oneAPI environment. It can be used by downloading Intel oneAPI Base Toolkit.You can set the environment by sourcing setvars.sh file using the below command.
source /opt/intel/oneapi/setvars.sh
|
73,724,674
| 73,724,727
|
Avoid Overriding method from library
|
I want to have the same name has the method from the C++ library cmath in a class method but without overriding it by my own method. I know I could just change the name but that is not what I want to do. Is this possible ?
calculator.cpp:
#include <calculator.h>
#include <cmath>
int Calculator::pow(int entier, int puissance) {
return pow(entier, puissance);
}
calculator.h:
class Calculator {
public:
Calculator() {}
int pow (int a, int b);
};
I already know that the types I am using are wrong for this type of computation but that is not the point.
|
You are not overriding anything. Your pow function is in a different scope than std::pow (or the global ::pow). The standard library pow is still there, unchanged by your definition.
It is just that unqualified name lookup will only find the functions with the name declared in the inner-most scope where a declaration for the name is found.
If that is not what you want, you need to qualify then name to let the compiler know which pow exactly you want to call, e.g.
return std::pow(entier, puissance);
to call the pow function in the standard library namespace std or
return ::pow(entier, puissance);
to call the pow function in the global namespace scope. However, including <cmath> does not guarantee that the standard library pow function will be declared in the global namespace scope, which is why you should use std::pow (instead of ::pow or just pow) in any case anyway.
|
73,724,841
| 73,724,861
|
Why does this variable have different values in different source files?
|
I have the following code:
main.cpp
#include "Test.h"
int main() {
Create();
}
Test.h
#pragma once
#include <iostream>
#include "Function.h"
class Test {
public:
Test();
};
extern Test* g_pTest;
inline void Create() {
g_pTest = new Test;
std::cout << "On Test.h: " << std::endl;
PrintAddr();
}
Test.cpp
#include "Test.h"
Test* g_pTest = nullptr;
Test::Test() {
std::cout << "On Test.cpp:" << std::endl;
PrintAddr();
}
Function.h
#pragma once
void PrintAddr();
Function.cpp
#include "Function.h"
#include "Test.h"
void PrintAddr() {
std::cout << "g_pTest address is " << g_pTest << std::endl;
}
When I run it, I get the following output:
On Test.cpp:
g_pTest address is 0000000000000000
On Test.h:
g_pTest address is 000002008A5EAE40
I thought that a extern variable was supposed to have the same value anywhere on the code, so why doesn't it? I've tried to run the same code, but instead of the Test class, I just have a function:
Test.h
#pragma once
#include <iostream>
#include "Function.h"
void Test();
extern int* g_pTest;
inline void Create() {
g_pTest = new int;
Test();
std::cout << "On Test.h: " << std::endl;
PrintAddr();
}
Test.cpp
#include "Test.h"
int* g_pTest = nullptr;
void Test() {
std::cout << "On Test.cpp:" << std::endl;
PrintAddr();
}
And it somehow works if it is done this way, I don't understand the logic...
What am I doing wrong here? What can I do in order to use a class and have access to the same extern variable in any code file?
|
int main() {
Create();
}
main calls Create(), which does this:
g_pTest = new Test;
Test gets newed first. g_pTest gets assigned after Test's constructor finishes and the object is constructed.
Test's constructor calls PrintAddr which prints the value of g_pTest because it is still nullptr:
Test* g_pTest = nullptr;
This remains the case until after Test finishes constructing.
Only after the constructor wraps up its business the pointer to the new object gets assigned to g_pTest, and then the 2nd call to printAddr (from Create) prints the value of the pointer, which is now pointing to the new object.
|
73,724,866
| 73,724,948
|
Exclude Macro, What can I use like inline function in C++
|
I don't want to edit the common part of the source code repeatedly.
So I separate the other parts with different functions as below.
/* Origin */
void MyClass::threadFunc_A()
{
// many variables in this function
...
// do something A
...
}
void MyClass::threadFunc_B()
{
// many variables in this function
...
// do something B
...
}
/* I wish */
void MyClass::threadFunc(type)
{
// many variables
int a, b;
char c, d, e;
...
string x, y, z;
...
// case
if (type == A) do_something_A();
if (type == B) do_something_B();
...
if (type == Z) do_something_Z();
}
void do_something_A()
{
// using "many variables (a ~ z)" here
a = 10;
b = 20;
...
}
In the case of macro functions, I know that the code is built in when compiling, so that variables within the same range can be used.
However, if the do_something() function is lengthened, there is a limit to writing using the macro function.
Is there a way to write an inline function like 'I wish' in C++ 17 or higher?
(Except for putting A as a member variable of the class or making it a structure and passing it)
|
No there is no way to do that. C++ has lexical scoping. What you want would be (at least partially) more like dynamic scoping.
The point of a function is that it separates some part of the logic into a self-contained block of code that can be reused. If you make the name resolution in the function dependent on the declarations at the call site, it becomes impossible to reason about the behavior of the function without also specifying the call site.
Macros effectively do behave that way, but that is not a good thing. It is one of the reasons to avoid them.
C++ does have templates, which allows making similar logic independent of concrete types in a function, but that still doesn't allow making name resolution dependent on the call site.
Write your functions so that they represent a part of the program logic that makes sense in itself. The function should take all variables to which it needs access as arguments, possibly with templated types, and if it needs to work on an unspecified number of arguments, possibly of different types, it can be a variadic function template. If there are many variables with similar meaning, consider putting them in an array or container or class combining them into one unit that makes sense in the program logic.
|
73,724,906
| 73,750,649
|
Output for specific test case in linked list deletes two numbers randomly
|
I'm working on a linked list program that takes an input for the data of each node like:
Sample Input:
2 18 24 3 5 7 9 6 12
Then it takes each group of even numbers like "2, 18, and 24" and reverses it to be "24, 18, and 2."
It seems to work on a larger scale according to these unit tests, but this particular one outputs:
2 3 5 7 9 12 6
Instead of:
24 18 2 3 5 7 9 12 6
So it just seems to delete the 24 and the 18 and I'm not sure why. Thanks in advance.
Here's my code:
#include <iostream>
using namespace std;
struct node {
int data;
node *next;
node *prev;
node *curr;
};
class linked_list {
private:
node *head,*tail;
public:
linked_list() {
head = NULL;
tail = NULL;
}
void add_node(int n) {
node *tmp = new node;
tmp->data = n;
tmp->next = NULL;
if(head == NULL) {
head = tmp;
tail = tmp;
}
else {
tail->next = tmp;
tmp->prev = tail;
tail = tail->next;
}
}
node* getHead() {
return head;
}
void print_List() {
node *tmp;
tmp = head;
while (tmp != NULL) {
cout << tmp->data << " ";
tmp = tmp->next;
}
}
node* reverse_Groups(node* head, node* prev) {
if (head == NULL) {
return NULL;
}
node *tmp;
node *curr;
curr = head;
while (curr != NULL && curr->data % 2 == 0) {
tmp = curr-> next;
curr->next = prev;
prev = curr;
curr = tmp;
}
if (curr != head) {
head->next = curr;
curr = reverse_Groups(curr, NULL);
return prev;
}
else {
head->next = reverse_Groups(head->next, head);
return head;
}
}
};
int main() {
linked_list a;
int numNodes, i, tempNode;
cin >> numNodes;
for (i = 0; i < numNodes; ++i) {
cin >> tempNode;
a.add_node(tempNode);
}
//a.print_List();
a.reverse_Groups(a.getHead(), NULL);
a.print_List();
return 0;
}
|
So I figured out it was a minor oversight on my end.
The code within reverse_Groups returns head when it's done. I was printing the list from the temp variable rather than the head, so it wasn't printing the first two numbers. Once I figured this out, I added a getHead function that returns the correct head after the reverse function finishes.
I just changed the print function to have a head parameter and supply the correct head using the getHead function so that it prints from the right starting point.
|
73,725,046
| 73,725,196
|
When does the conversion happen when passing arguments to thread function?
|
When reading a book about c++ multi-thread programming, I came across one example below.
void f(int i,std::string const& s);
void oops(int some_param)
{
char buffer[1024];
sprintf(buffer, "%i",some_param);
std::thread t(f,3,buffer);
t.detach();
}
In this case, it’s the pointer to the local variable buffer that’s passed through to the new thread, and there’s a significant chance that the function oops will exit before the buffer has been converted to a std::string on the new thread, thus leading to undefined behavior.
I know that it is the char* type argument buffer, not std::string is copied into the internal storage of thread t. What confuses me is that there's a chance the conversion from char* to std::string has not taken place even when thread t is already constructed. So when will the conversion take place? Is it just before the thread is scheduled to execute by the OS?
|
As you are saying the array argument is decayed to a char*, which is then copied for the thread. To be more specific the newly created thread executes basically the following expression:
std::invoke(auto(std::forward<F>(f)), auto(std::forward<Args>(args))...)
where f and args are the parameters of the std::thread constructor and auto(/*...*/) is the new C++23 syntax which creates a prvalue of the decayed type of the argument from the argument (and materializes a temporary object from it when being bound to the reference in the parameter of std::invoke).
However, in this evaluation the materialization of the temporary objects auto(/*...*/) is performed in the context of the caller of the thread constructor and the beginning of the call to std::invoke synchronizes with the completion of the std::thread constructor.
That means the auto(/*...*) decayed argument copies are made before the thread starts execution, but everything else happens as part of the new thread of execution.
So auto(buffer) will give a temporary object of type char* which lives until the end of the thread function invocation (because temporary objects live until the end of the full expression in which they are created). Then std::invoke is called with a reference to this temporary object as argument. This invocation happens in the newly created thread of execution and everything from here happens unsynchronized with the rest of the execution in the original thread.
std::invoke basically calls the copied f given to it as first argument and forwards the other arguments to f. Now since your f expects a std::string const& as second parameter, but std::invoke will pass it a char* as argument, a temporary std::string will be materialized, initialized from the char*, for the reference to bind to. This construction happens therefore in the context of the std::invoke call, which is already unsynchronized with the potential destruction, in the main thread, of the array to which the pointer points.
|
73,725,254
| 73,725,415
|
C++ Recursion and Exception Handling with Fibonacci Sequence
|
This program seems to work for the Fibonacci Sequence using recursion and exception handling. (Yes I want to do it with recursion, I know I can use loops).
It is supposed to throw an error if the next result is out of range for long long. Which it works, if I put in most numbers, but if I put in number 91, it shows one negative result without printing the error message. If I put in 89, 90, 92, 93, ... it works fine.
Why 91?
Output of 91:
0 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393 196418 317811 514229 832040 1346269 2178309 3524578 5702887 9227465 14930352 24157817 39088169 63245986 102334155 165580141 267914296 433494437 701408733 1134903170 1836311903 2971215073 4807526976 7778742049 12586269025 20365011074 32951280099 53316291173 86267571272 139583862445 225851433717 365435296162 591286729879 956722026041 1548008755920 2504730781961 4052739537881 6557470319842 10610209857723 17167680177565 27777890035288 44945570212853 72723460248141 117669030460994 190392490709135 308061521170129 498454011879264 806515533049393 1304969544928657 2111485077978050 3416454622906707 5527939700884757 8944394323791464 14472334024676221 23416728348467685 37889062373143906 61305790721611591 99194853094755497 160500643816367088 259695496911122585 420196140727489673 679891637638612258 1100087778366101931 1779979416004714189 2880067194370816120 4660046610375530309 -6246583658587674878
CPP Program:
#include <iostream>
using std::cout;
using std::cin;
using std::endl;
using std::cerr;
#include <stdexcept>
using std::out_of_range;
#include <climits>
class OUTofRage : public out_of_range
{
public:
OUTofRage()
: out_of_range("Out Of Range\n") {}
} ;
long long fibonacci(long long target, long long numberOne, long long numberTwo);
int main() {
long long fiboSub;
cout << "--fibonacci Sequencer--\n" << endl;
cout << "Which place in the fibonacci sequence do you want to reach (F(n))?\n";
cout << "n> ";
cin >> fiboSub;
cout << endl << fibonacci(fiboSub, 0, 1) << endl << endl;
return 0;
}
long long fibonacci(long long target, long long numberOne, long long numberTwo) {
cout << numberOne << " ";
if(target < 0) {
throw OUTofRage();
} else if(target == 0) {
return numberOne + numberTwo;
} else {
try {
if((numberOne + numberTwo) < 0) {
throw OUTofRage();
} else {
fibonacci(target-1, numberTwo, numberOne + numberTwo);
}
}
catch (const out_of_range& O_O_R) {
cerr << endl << "\nError: " << O_O_R.what() << '\n';
exit (EXIT_FAILURE);
}
}
}
|
This line of code is invalid:
if((numberOne + numberTwo) < 0)
per Is signed integer overflow still undefined behavior in C++? you are relying on Undefined Behaviour, which is a mistake. You can replace your condition with:
if( std::numeric_limits<long long>::max() - numberOne < numberTwo )
or more generic:
if( std::numeric_limits<decltype(numberOne)>::max() - numberOne < numberTwo )
which does not overflow and will give you predictable result. For this to work, you also cannot keep this condition as it is, without checking for overflow first:
if(target == 0) {
return numberOne + numberTwo;
as it relies on the same problem. So here how your function could look like:
long long fibonacci(long long target, long long numberOne, long long numberTwo)
{
// check for overflow first
if(std::numeric_limits<decltype(numberOne)>::max() - numberOne < numberTwo)
throw OUTofRage();
if(target == 0)
return numberOne + numberTwo;
return fibonacci(target-1, numberTwo, numberOne + numberTwo);
}
Live example
PS on this line:
fibonacci(target-1, numberTwo, numberOne + numberTwo);
you are missing return statement
|
73,725,569
| 73,725,605
|
Getting unexpected output when writing a program to find the first perfect square with two odd ending digits
|
I'm a beginner taking a C++ class. Thanks for your help. So I have to write a program that finds the first perfect square which ALSO has two odd digits, and it doesn't produce the expected output.
#include <iostream>
#include <cmath>
using namespace std;
int main()
{
for (int i = 1; i < 1000; i++)
{
// 1. Iterate from 1 to 100 and check which number is a perfect square (root is integer)
int sqrRoot = sqrt(i);
if (sqrRoot * sqrRoot == i)
// 2. We need to find the FIRST perfect square who last digit is odd
{
int lastDigit = i % 10;
int secondToLastDigit = i % 100;
if(lastDigit % 2 != 0 && secondToLastDigit % 2 != 0)
{
cout << "Found it. The first perfect square with two odd ending digits is " << i << "\n.";
}
}
// we printed out every perfect square...
}
}
First, I created a for loop to check from 1 to 1000 to find the first perfect squares in the range. Then in the for loop I tested for it being a perfect square or not in an if statement. Then I created two temporary variables in the for loop, lastDigit and secondtoLastDigit which 'grabs' the last most and second to last most digit using the mod 10 'trick.' Then I check if those two digits are odd or not, and if they are, then print out the perfect square that has ending two odd digits.
Then, when I run it, I noticed I got a bunch of outputs:
shahjacob@lenovoLegion7:~/cs211$ ./a.out
Found it. The first perfect square with two odd ending digits is 25
.Found it. The first perfect square with two odd ending digits is 49
.Found it. The first perfect square with two odd ending digits is 81
.Found it. The first perfect square with two odd ending digits is 121
.Found it. The first perfect square with two odd ending digits is 169
.Found it. The first perfect square with two odd ending digits is 225
.Found it. The first perfect square with two odd ending digits is 289
.Found it. The first perfect square with two odd ending digits is 361
.Found it. The first perfect square with two odd ending digits is 441
.Found it. The first perfect square with two odd ending digits is 529
.Found it. The first perfect square with two odd ending digits is 625
.Found it. The first perfect square with two odd ending digits is 729
.Found it. The first perfect square with two odd ending digits is 841
.Found it. The first perfect square with two odd ending digits is 961
But I was confused first, since some of these outputs are unexpected. First, 121 literally has an even second to last digit. So does 225. Second, why does it not just print the first one?
Please help
|
int secondToLastDigit = i % 100; gives two last digits. Thus lastDigit % 2 != 0 && secondToLastDigit % 2 != 0 tests are equivalent tests of the odd number.
Example with 121: secondToLastDigit is 1, secondToLastDigit is 21. 21 % 2 is 1 % 2 is 1.
You might want int secondToLastDigit = (i / 10) % 10;.
|
73,725,881
| 73,726,346
|
How can we decide that an integer is power of 2 by using bit manipulation?
|
For example if the given number n = 16
If it is power of 2 then the output should be true else false, i want the answer by using bit manipulation.
|
The solution mentioned by @TimRoberts is the most simple way of using bit manipulation.
Just check if (n & (n - 1) == 0). Why does this work?
Assume n = 8 (1000 in base 2), then n - 1 = 7 (0111 in base 2). If you do a bitwise AND of these two, you get 0. This is true for n equal to any power of 2.
So you function should simply be:
bool is_power_of_2(unsigned long n)
{
return n != 0 && (n & (n - 1)) == 0;
}
|
73,726,579
| 73,726,660
|
c++ how to use while properly
|
I am learning c++ around two weeks and therefore have a lot of questions. It feels like i learn a new sport. My body in my thinking already moving much better than any other olympic players, but the actual movement is so poor.
what i want to know is if i can use "while" in cout together.
int main() {
struct {
string engineType;
string brand;
int price;
int range;
} candidate1, candidate2;
// information of candidate 1
candidate1.name = "IONIQ5";
candidate1.range = 450;
candidate1.price = 35000;
// information of candidate 2
candidate2.brand = "Tesla_Model_3";
candidate2.range = 650;
candidate2.price = 55000;
// show each price with while function
int i = 1;
while (i<3) {
cout << "Price :" << candidate[i].range << endl ;
i++;
}
return 0;
I want to have as a result of print
450
650
what do i have to do to get it ?
Thanks for the help !
|
You can use an array with elements of type Candidate and then loop through the array and print the values as shown below:
//class representing a Candidate info
struct Candidate{
string engineType;
string brand;
int price;
int range;
};
int main() {
//create an array witht element of type Candidate;
Candidate arr[] = {{"IONIQ5", "Honda", 450, 3500}, {"Tesla_Model_3", "Tesla", 650, 55000}};
//iterate through the array using range based for loop
for(const Candidate& candidate: arr)
{
std::cout<<candidate.range<<std::endl;
}
}
Working demo
|
73,726,748
| 73,728,927
|
IDispatch null pointer exception while creating Active Directory user
|
I am writing a C++ native method to create an Active Directory user. I am getting a null pointer exception . This code is exactly the same as the code in the official Microsoft documentation.
I have mentioned in a comment on which line I get the error:
HRESULT CreateUserFromADs(
LPCWSTR pwszName,
LPCWSTR pwszSAMAccountName,
LPCWSTR pwszInitialPassword)
{
HRESULT hr;
CoInitialize(NULL);
IADsContainer* pUsers = NULL;
hr = ADsOpenObject(L"LDAP://WIN-F94H2MP3UJR.Test.local/CN=Users,DC=Test,DC=local", L"Administrator", L"Pass@12",
ADS_SECURE_AUTHENTICATION, // For secure authentication
IID_IADs,
(void**)&pUsers);
if (SUCCEEDED(hr))
{
IDispatch* pDisp = NULL;
CComBSTR sbstrName = "CN=";
sbstrName += pwszName;
// Create the new object in the User folder.
hr = pUsers->Create(CComBSTR("user"), sbstrName, &pDisp);
if (SUCCEEDED(hr))
{
IADsUser* padsUser = NULL;
// Get the IADs interface.
// Am getting null pointer exception here.
hr = pDisp->QueryInterface(IID_IADsUser,
(void**)&padsUser);
if (SUCCEEDED(hr))
{
CComBSTR sbstrProp;
/*
The sAMAccountName property is required on operating system
versions prior to Windows Server 2003.
The Windows Server 2003 operating system will create a
sAMAccountName value if one is not specified.
*/
CComVariant svar;
svar = pwszSAMAccountName;
sbstrProp = "sAMAccountName";
hr = padsUser->Put(sbstrProp, svar);
/*
Commit the new user to persistent memory.
The user does not exist until this is called.
*/
hr = padsUser->SetInfo();
/*
Set the initial password. This must be done after
SetInfo is called because the user object must
already exist on the server.
*/
hr = padsUser->SetPassword(CComBSTR(pwszInitialPassword));
/*
Set the pwdLastSet property to zero, which forces the
user to change the password the next time they log on.
*/
sbstrProp = "pwdLastSet";
svar = 0;
hr = padsUser->Put(sbstrProp, svar);
/*
Enable the user account by removing the
ADS_UF_ACCOUNTDISABLE flag from the userAccountControl
property. Also, remove the ADS_UF_PASSWD_NOTREQD and
ADS_UF_DONT_EXPIRE_PASSWD flags from the
userAccountControl property.
*/
svar.Clear();
sbstrProp = "userAccountControl";
hr = padsUser->Get(sbstrProp, &svar);
if (SUCCEEDED(hr))
{
svar = svar.lVal & ~(ADS_UF_ACCOUNTDISABLE |
ADS_UF_PASSWD_NOTREQD |
ADS_UF_DONT_EXPIRE_PASSWD);
hr = padsUser->Put(sbstrProp, svar);
hr = padsUser->SetInfo();
}
hr = padsUser->put_AccountDisabled(VARIANT_FALSE);
hr = padsUser->SetInfo();
padsUser->Release();
}
pDisp->Release();
}
pUsers->Release();
}
CoUninitialize();
return hr;
}
Error message is
Exception thrown: read access violation.
pDisp was nullptr.
|
You are asking ADsOpenObject() for an IADs* interface pointer, but you are storing it in an IADsContainer* variable. That is a type mismatch, IADs and IADsContainer are unrelated interfaces. So, when you call pUsers->Create(), you are not actually calling IADsContainer::Create() at all, you are actually calling IADs::SetInfo() instead, which does not assign anything to pDisp, which is why it remains NULL.
The Microsoft doc you linked to does not make that mistake, it asks ADsOpenObject() for IADsContainer instead of IADs.
The IID you request must match the variable type you receive into. So, change IID_IADs to IID_IADsContainer:
IADsContainer* pUsers = NULL;
hr = ADsOpenObject(...,
IID_IADsContainer,
(void**)&pUsers);
You should consider using the IID_PPV_ARGS() macro to avoid making this mistake again:
IADsContainer* pUsers = NULL;
hr = ADsOpenObject(...,
IID_PPV_ARGS(&pUsers));
On a side note, your CreateUserFromADs() function has no business calling CoInitialize() directly, so you should remove that call. It is the responsibility of the calling thread to call CoInitialize/Ex() to establish its concurrency model before performing any COM-related activities. It is not up to functions to decide what concurrency model to use on behalf of the calling thread.
|
73,726,828
| 73,731,890
|
Use button from a qml to control(interact) the size of Rectangle which is in another qml file [QML] [JS]
|
I recently start to learn Qt and QML for prototyping some UI and experienced some issue.
Basically, I have a Rectangle (id: myItem) in TestB.qml which is considered as a button. I also have another Rectangle (id:changedrect) in TestA.qml.
The functionalities I want to implement is when myItem is clicked, the width and height of changedrect in TestA.qml will change.
Here are the hierarchy of directories
My TestA.qml
import QtQuick 2.15
import QtQuick.Window 2.15
import Qt5Compat.GraphicalEffects
import QtQuick.Shapes 2.15
import QtQml 2.15
import QtQuick.Controls 2.15
import QtQuick.Layouts 2.15
import QtMultimedia
//import "../TestB" as TestB
import "resizeRect.js" as ResizeRectScript
Item {
id: testA
property alias changeRect: changedrect
/** expose the boolean*/
property bool ifSlected: false
Rectangle{
anchors.fill: parent
id: changedrect
x: 10
y: 10
width:200
height: 200
color:"red"
Component.onCompleted:{
ResizeRectScript.resize(testA.ifSlected,changedrect); // pass boolean flag && id of rect
}
}
// Loader {
// id: myLoader
// source: "../TestB/TestB.qml"
// }
// Connections {
// target: myLoader.item
// function onMessage(msg) { console.log(msg) }
// }
}
resizeRect.js
function resize(selectFlag, rectId){
if(selectFlag = true){
rectId.width = 100;
console.log("yes, resize is excuted");
}
}
TestB.qml
import QtQuick 2.15
import QtQuick.Window 2.15
import Qt5Compat.GraphicalEffects
import QtQuick.Shapes 2.15
import QtQml 2.15
import QtQuick.Controls 2.15
import QtQuick.Layouts 2.15
import QtMultimedia
import "../TestA" as TestA
Item{
TestA.TestA{
id: testA
}
Rectangle {
id: myItem
signal message()
width: 100; height: 100
color: "pink"
MouseArea {
anchors.fill: parent
onClicked: {
testA.ifSlected = true;
}
}
}
}
And I instantiated(created object) TestA and TestB on test2.qml
test2.qml
import QtQuick 2.15
import QtQuick.Window 2.15
import Qt5Compat.GraphicalEffects
import QtQuick.Shapes 2.15
import QtQml 2.15
import QtQuick.Controls 2.15
import QtQuick.Layouts 2.15
import QtMultimedia
import "TestA" as TestA
import "TestB" as TestB
Window {
width: Screen.width
height:Screen.height
flags: Qt.FramelessWindowHint | Qt.BypassWindowManagerHint |Qt.WindowStaysOnBottomHint |
Qt.NoDropShadowWindowHint /** hide the native menu bar so that the windows takes over whole screen */
visibility: "FullScreen"
visible: true
color: "black"
TestA.TestA{
id:test_A
}
TestB.TestB{
id: test_B
}
}
The problem is
the console message is printed, which means the resize() function from js file is excuted(see below), however the size of rectangle in TestA.qml is not updated. I tried to use Signal from Qt documentation, but the examples did not work either.
Here is the terminal output:
Can someone helps me with this? Thank you so much!!
|
The only time that I see your resize function called is when you construct a TestA object. You're getting two printouts of "yes, resize is excuted" because you've created two instances of TestA. From the code you've shown, it will not execute that code when you click on TestB. There's plenty of ways to fix this. My recommendation is to remove references to TestA from inside TestB and handle all of their interactions within test2.qml. Also, you should automatically call resize whenever your ifSlected property changes.
TestA.qml:
Item {
id: testA
property bool ifSlected: false
/* Automatically call resize when boolean value changes */
onIfSlectedChanged: {
ResizeRectScript.resize(testA.ifSlected, changedrect);
}
Rectangle {
id: changedrect
// ...
}
}
TestB.qml:
Item {
id: testB
/* Don't create another instance of TestA here! */
// TestA.TestA{
// id: testA
// }
/* Create a signal to send up to the parent when clicked */
signal clicked()
Rectangle {
id: myItem
// ...
MouseArea {
anchors.fill: parent
onClicked: {
/* We don't know anything about testA from inside testB */
// testA.ifSlected = true;
/* Just emit the signal */
testB.clicked();
}
}
}
}
test2.qml:
Window {
// ...
/* Now this is the only instance of TestA */
TestA.TestA {
id: test_A
}
TestB.TestB{
id: test_B
onClicked: {
testA.ifSlected = true;
}
}
}
|
73,727,407
| 73,921,271
|
Make 2D Sprite Face Camera Using Vertex Shader - DirectX 9
|
Currently, I'm calculating the world matrix in C++ and then pass it to the shader to make the sprite always face the camera:
static D3DXVECTOR3 up(0, 0, 1);
D3DXMATRIX world, view, proj;
// get the world, view and projection matrix
g_pd3dDevice->GetTransform(D3DTS_WORLD, &world);
g_pd3dDevice->GetTransform(D3DTS_VIEW, &view);
g_pd3dDevice->GetTransform(D3DTS_PROJECTION, &proj);
D3DXMATRIX translation, invView, cameraPosition, rotation,invRotation;
// get the camera position by inversing the view matrix
D3DXMatrixInverse(&invView, NULL, &view);
cameraPosition = D3DXVECTOR3(invView._41, invView._42, invView._43);
// translate the sprite position to a world matrix
D3DXMatrixTranslation(&translation, spritePosition.x, spritePosition.y, spritePosition.z);
// calculate the world matrix rotation to look from
// the sprite position to the camera position
D3DXMatrixLookAtRH(&invRotation, &spritePosition, &cameraPosition, &up);
D3DXMatrixInverse(&rotation, NULL, &invRotation);
// pass the world * view * projection to the shader
world = rotation * translation;
worldViewProj = matrix.rotation * matrix.view * matrix.proj;
g_pEffect->SetMatrix("WorldViewProj", &worldViewProj);
I've just been learning DirectX and HLSL for the past few days so I don't know if this is the optimal and correct way to do it.
I thought it would have been better done in the vertex shader but I don't know how, please guide me.
|
SimpleMath in the DirectX Tool Kit includes Matrix::CreateBillboard and Matrix::CreateConstrainedBillboard which is specifically designed for creating this kind of transformation matrix.
inline Matrix Matrix::CreateBillboard(
const Vector3& object,
const Vector3& cameraPosition,
const Vector3& cameraUp,
const Vector3* cameraForward) noexcept
{
using namespace DirectX;
const XMVECTOR O = XMLoadFloat3(&object);
const XMVECTOR C = XMLoadFloat3(&cameraPosition);
XMVECTOR Z = XMVectorSubtract(O, C);
const XMVECTOR N = XMVector3LengthSq(Z);
if (XMVector3Less(N, g_XMEpsilon))
{
if (cameraForward)
{
const XMVECTOR F = XMLoadFloat3(cameraForward);
Z = XMVectorNegate(F);
}
else
Z = g_XMNegIdentityR2;
}
else
{
Z = XMVector3Normalize(Z);
}
const XMVECTOR up = XMLoadFloat3(&cameraUp);
XMVECTOR X = XMVector3Cross(up, Z);
X = XMVector3Normalize(X);
const XMVECTOR Y = XMVector3Cross(Z, X);
XMMATRIX M;
M.r[0] = X;
M.r[1] = Y;
M.r[2] = Z;
M.r[3] = XMVectorSetW(O, 1.f);
Matrix R;
XMStoreFloat4x4(&R, M);
return R;
}
This is a port of the XNA Game Studio C# math library to C++ using DirectXMath.
|
73,727,501
| 73,727,628
|
Run C++ code in mac without Xcode, and use custom header bits/stdc++.h
|
Trying to run C++ on Vscode on a Mac, but the stdc++.h library is not found.
I want to setup bits/stdc++.h instead of the custom header of clang++.
fatal error: 'bits/stdc++.h' file not found.
It will help if someone give me the c_cpp_properties.json file and settings
|
stdc++.h setup on mac (without xcode)
Assuming that you've installed the homebrew and C/C++ compiler extension.
Then follow the steps. As the bits/stdc++ is a GNU GCC extension,
where OSX uses the clang compiler.
brew install gcc
gcc --version
go to the /Library/Developer/CommandLineTools/usr/include directory
(go to finder, type command+shift+g, then paste the directory name)
create a folder named bits inside this directory and then copy the stdc++.h file from this
github link (https://github.com/gcc-mirror/gcc/blob/master/libstdc%2B%2B-v3/include/precompiled/stdc%2B%2B.h).
create a file naming bits/stdc++.h inside the bits folder and paste the code copied from the github link into bits/stdc++.h file,
then save it.
then initially after restarting vscode, some user get a error message as
some library is deprecated in stdc++.h.
for dismissing that error, you should add the gcc library's path to c_cpp_properties.json file's includePath.
("/usr/local/Cellar/gcc/12.2.0/include/c++/12",
"/usr/local/Cellar/gcc/12.2.0/include/c++/12/x86_64-apple-darwin21")
You can get this path by hovering on your #include file on code too.
add these two path to your includePath section inside of C_cpp_properties.json file.
make sure your compilerPath is "/usr/bin/clang"
I am giving the c_cpp_properties.json initial file here for your understanding.
"configurations": [
{
"name": "Mac",
"includePath": [
"${workspaceFolder}/**",
"/usr/local/Cellar/gcc/12.2.0/include/c++/12",
"/usr/local/Cellar/gcc/12.2.0/include/c++/12/x86_64-apple-darwin21"
],
"defines": [],
"macFrameworkPath": [
"/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/System/Library/Frameworks"
],
"compilerPath": "/usr/bin/clang",
"cStandard": "c17",
"cppStandard": "c++17",
"intelliSenseMode": "macos-clang-x64"
}
],
|
73,727,951
| 73,839,488
|
Create boost::spsc queue in boost managed shared memory with a runtime size
|
Shared-memory IPC synchronization (lock-free)
My use case aligns very closely with what has been described in the above question. But I wanted to go a step further in creating the spsc queue dynamically with a user defined runtime size. I tried implementing it with the following code:
void create_shared_spsc_queue(size_t sz)
{
using char_alloc = boost::interprocess::allocator<char, boost::interprocess::managed_shared_memory::segment_manager>;
using shared_string = boost::interprocess::basic_string<char, std::char_traits<char>, char_alloc>;
using string_alloc = boost::interprocess::allocator<shared_string, boost::interprocess::managed_shared_memory::segment_manager>;
using ring_buffer_dynamic = boost::lockfree::spsc_queue<shared_string, boost::lockfree::allocator<string_alloc> >;
ring_buffer_dynamic *queue_dynamic_;
boost::interprocess::managed_shared_memory segment_(boost::interprocess::open_only, "MySharedMemroy");
string_alloc string_alloc_(segment_.get_segment_manager());
queue_dynamic_ = segment_.construct<ring_buffer_dynamic>(rbuff_name)(string_alloc_, sz);
}
But this throws compilation error:
/usr/include/boost/interprocess/detail/named_proxy.hpp:85:7: error: no matching function for call to ‘boost::lockfree::spsc_queue<boost::container::basic_string<char, std::char_traits<char>, boost::interprocess::allocator<char,
boost::interprocess::segment_manager<char, boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family>, boost::interprocess::iset_index> > >,
boost::lockfree::allocator<boost::interprocess::allocator<boost::container::basic_string<char, std::char_traits<char>, boost::interprocess::allocator<char, boost::interprocess::segment_manager<char, boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family, boost::interprocess::offset_ptr<void, long int, long unsigned int, 0>, 0>,
boost::interprocess::iset_index> > >, boost::interprocess::segment_manager<char,
boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family, boost::interprocess::offset_ptr<void, long int, long unsigned int, 0>, 0>, boost::interprocess::iset_index> > > >::spsc_queue(boost::interprocess::allocator<boost::container::basic_string<char, std::char_traits<char>, boost::interprocess::allocator<char,
boost::interprocess::segment_manager<char, boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family>, boost::interprocess::iset_index> > >, boost::interprocess::segment_manager<char, boost::interprocess::rbtree_best_fit<boost::interprocess::mutex_family>, boost::interprocess::iset_index> >&, int)’
85 | { ::new((void*)mem, boost_container_new_t())T(boost::forward<Args>(get<IdxPack>(args_))...); }
I can understand its related to issue with allocators, but I can't seem to resolve it,
with my limited understanding of allocators. How can I implement this?
|
For posterity: I figured, I was calling the ctor of spsc_queue in wrong order of arguments. The following works:
queue_dynamic_ = segment_.construct<ring_buffer_dynamic>(rbuff_name)(sz, string_alloc_);
Source: https://www.boost.org/doc/libs/1_80_0/doc/html/boost/lockfree/spsc_queue.html
|
73,728,232
| 73,728,413
|
Converting Integer Types
|
How does one convert from one integer type to another safely and with setting off alarm bells in compilers and static analysis tools?
Different compilers will warn for something like:
int i = get_int();
size_t s = i;
for loss of signedness or
size_t s = get_size();
int i = s;
for narrowing.
casting can remove the warnings but don't solve the safety issue.
Is there a proper way of doing this?
|
You can try boost::numeric_cast<>.
boost numeric_cast returns the result of converting a value of type Source to a value of type Target. If out-of-range is detected, an exception is thrown (see bad_numeric_cast, negative_overflow and positive_overflow ).
|
73,729,045
| 73,729,073
|
Declaring a template class as friend
|
Here is an MCVE:
template <typename T>
class A {
public:
friend class B;
};
template <typename T>
class B {};
int main() {
A<int> a;
B<int> b;
return 0;
}
Very simple thing and I dont know why this is giving compiler errors. I am new to using templates. I also tried changing the friend declaration to friend class B<T>; but that gave other errors, and did not work as well.
Here is the errors that the compiler is throwing for the above code:
1> Error C2989 'B': class template has already been declared as a non-class template D:\Projects\1.C++\test\test\test.cpp 8
2> Error C3857 'B': multiple template parameter lists are not allowed test D:\Projects\1.C++\test\test\test.cpp 7
3> Error C2059 syntax error: '<' test D:\Projects\1.C++\test\test\test.cpp 12
|
It depends on what you want, if you want to make B<T> a friend of A<T> then friend class B<T>; was right, but it needs a declaration of B:
template <typename T> class B;
template <typename T>
class A {
public:
friend class B<T>;
};
template <typename T>
class B {};
int main() {
A<int> a;
B<int> b;
return 0;
}
|
73,729,290
| 73,729,404
|
CMake only build lib when compiler supports C++20 or higher
|
in our project we are using the highest available C and CXX standard by setting
set(CMAKE_C_STANDARD 17)
set(CMAKE_CXX_STANDARD 20)
However the project is also build with some old compilers that do not support C++20.
Some libs on the other hand require C++20. How can i configure my project so that these libs are only build if the compiler supports C++20?
(I know i could use #ifdef __cplusplus >= ... but I'm trying to avoid #ifdefs ;))
Thx for your help
:)
|
Well, the variable name speaks for itself. https://cmake.org/cmake/help/latest/variable/CMAKE_CXX_STANDARD_REQUIRED.html
set(CMAKE_CXX_STANDARD_REQUIRED YES)
You should prefer set_target_properties.
My expectation is that old compilers automatically skip building the c++20 lib
So do not add target if we don't have C++20.
if(NOT "cxx_std_20" IN_LIST CMAKE_CXX_COMPILE_FEATURES)
add_library(the_lib_to_skip ...)
endif()
|
73,729,659
| 73,729,787
|
std::conditional_t, How to conditionally define the type of variable when both branches do not compile at the same time?
|
I have a templated function which treats 2 types of classes (with old or new format).
I want to define a variable that will have its type defined at compile time like:
template <typename T>
using MyType = std::conditional_t<isNewFormatCondition<T>, typename T::subClass::Format, typename T::Format::reference>
template <typename T>
extract(T& t){
MyType<T> var{t.getFormat()};
}
I mean, For T which is a new classes, var will have type T::subClass::Format, and for old classes it will be a T::Fromat&
More context:
Both types of classes support getFormat().
Naturally this will not compile as old classes don't have
'subClass::Format' in them, and vice versa
This question answers the case when both branches of std::conditional compile: How to conditionally declare a local variable based on a template argument?
|
std::conditional_t is not SFINAE, all template arguments must be valid. You can either use SFINAE or simple specialization:
#include <type_traits>
#include <iostream>
template <typename T,bool>
struct MyType;
template <typename T>
struct MyType<T,false> {
using type = int;
};
template <typename T>
struct MyType<T,true> {
using type = double;
};
template <typename T,bool b>
using MyType_t = typename MyType<T,b>::type;
int main() {
std::cout << std::is_same_v< MyType<void,true>::type, double> << "\n";
std::cout << std::is_same_v< MyType<void,false>::type, int> << "\n";
}
|
73,730,002
| 73,732,987
|
Generalizing std::conditional_t<>
|
I have a function that computes a certain object from a given parameter (say, an important node from a graph). Now, when calculating such an object, the function might allocate some memory. Sometimes I want the function to just return the result, and sometimes to return the result plus the memory used to compute it.
I typically solve this binary case like this:
enum class what {
what1, // return, e.g., just an int
what2 // return, e.g., a std::pair<int, std::vector<int>>
};
template <what w>
std::conditional_t<w == what::what1, int, std::pair<int, std::vector<int>>>
calculate_something(const param& p) { ... }
I would like to generalize the solution above to longer enumerations
enum class list_whats {
what1,
what2,
what3,
what4,
what5
};
One possible solution is to nest as many std::conditional_t as needed
template <list_whats what>
std::conditional_t<
what == list_whats::what1,
int,
std::conditional_t<
what == list_whats::what2,
float,
....
>
>
>
calculate_something(const param& p) { ... }
But this is cumbersome and perhaps not too elegant.
Does anyone know how to do this in C++ 17?
EDIT
To make the question perfectly clear: how do I implement the function return_something so as to be able to run the following main?
int main() {
int s1 = return_something<list_whats::what1>();
s1 = 3;
float s2 = return_something<list_whats::what2>();
s2 = 4.0f;
double s3 = return_something<list_whats::what3>();
s3 = 9.0;
std::string s4 = return_something<list_whats::what4>();
s4 = "qwer";
std::vector<int> s5 = return_something<list_whats::what5>();
s5[3] = 25;
}
|
I don't think you should use std::conditional at all to solve your problem. If I get this right, you want to use a template parameter to tell your function what to return. The elegant way to do this could look something like this:
#include <vector>
enum class what { what1, what2 };
template <what W>
auto compute() {
if constexpr (W == what::what1) {
return 100;
}
if constexpr (W == what::what2) {
return std::pair{100, std::vector<int>{}};
}
}
auto main() -> int {
[[maybe_unused]] const auto as_int = compute<what::what1>();
[[maybe_unused]] const auto as_pair = compute<what::what2>();
}
You can also use template specialization if you prefer another syntax:
template <what W>
auto compute();
template <>
auto compute<what::what1>() {
return 100;
}
template <>
auto compute<what::what2>() {
return std::pair{100, std::vector<int>{}};
}
|
73,730,354
| 73,732,944
|
Initialisation in Singleton
|
So, I am creating a small testing library for some simple tasks. I use the self-registration method to define the tests, but I am getting a segfault that I don't understand where is coming from. My project looks like this
Lib
|
|__include
| |__lib.hpp
|__src
| |__lib.cpp
|__examples
| |__example.cpp
|__Makefile
This is my .hpp. Please, ignore the public maps, as I am well aware that they should not be public in the final version.
#include <string>
#include <map>
#include <functional>
#define TEST(name, result_var_name, short_result_var_name, explanation_var) \
bool test_##name(std::string result_var_name); \
static bool test_##name##_registered = TestFactory::getInstance()->Register(#name, &test_##name, #result_var_name, #short_result_var_name); \
static bool test_##name##_description = TestFactory::getInstance()->RegisterExplanation(#result_var_name, explanation_var); \
bool test_##name(std::string result_var_name)
#define RESULT_LONG(result_var_name, result) \
TestFactory::getInstance()->RegisterResult(result_var_name, result);
#define RESULT_SHORT(result_var_name, result) \
TestFactory::getInstance()->RegisterResult(TestFactory::getInstance()->short_to_long_param[result_var_name], result);
class TestFactory {
private:
TestFactory();
public:
std::map<std::string, std::function<bool(std::string)>> Tests;
std::map<std::string, std::string> short_to_long_param;
std::map<std::string, std::string> param_to_testName;
std::map<std::string, std::string> testName_to_result;
std::map<std::string, std::string> param_to_explanation;
static TestFactory* getInstance();
bool Register(std::string name, std::function<bool(std::string)> func, std::string long_param, std::string short_param);
bool RegisterResult(std::string result_var_name, std::string result);
bool RegisterExplanation(std::string result_var_name, std::string explanation);
};
And this is my example file:
#include "lib.hpp"
#include <iostream>
TEST(testing_test, foo, f, "This is a description of foo"){
try
{
return foo == "example";
}
catch(const std::exception& e)
{
std::cerr << "Exception thrown in testing_test: " << e.what() << '\n';
return false;
}
};
It was working when I built everything together manually, but then I decided to create the static library and compile the example separately, it started segfaulting.
This is what my Makefile looks like (as you can see, I am not a pro Makefile user, please be gentle)
LIBNAME := libtest.a
CXX := g++
BIN := bin
SRC := src
INCLUDE := include
LIB := lib
LIBRARIES := -ltest
EXECUTABLE := example
LIBRARY_SOURCES := $(SRC)/lib.o
EXECUTABLE_SOURCES := examples/example.o
CXXFLAGS := -Wall -Wextra -std=c++17 -ggdb -I$(INCLUDE)
all: $(BIN)/$(EXECUTABLE)
run: clean all
clear
./$(BIN)/$(EXECUTABLE)
$(LIB)/$(LIBNAME): $(LIBRARY_SOURCES)
ar rcs $@ $^
$(BIN)/$(EXECUTABLE): $(LIB)/$(LIBNAME)
$(CXX) $(CXXFLAGS) -I$(INCLUDE) -L$(LIB) $(EXECUTABLE_SOURCES) -o $@ $(LIBRARIES)
clean:
-rm -rf $(BIN)/*
-rm -rf $(LIB)/*
Initially, I had all static methods and variables, and I changed them to a singleton to avoid initialisation and memory issues. However, I am getting a segfault in the Register method when it is trying to access the Test map. Any ideas?
EDIT:
So, I reproduced the compilation manually like so:
g++ -Iinclude -std=c++17 -c src/mxIntegration.cpp -o lib/manual_object_file.o
ar rvs lib/manual_library.a lib/manual_object_file.o
g++ -Iinclude examples/main.cpp lib/manual_object_file.o -o bin/manual_test_linking
The resulting executable is running. I'm going to try to catch any difference in the process now.
|
So, after careful analysis, it seems like the example.o file was not being generated successfully. I basically added them as a requirement for the executable recipe, and updated the clean recipe. I believe the .a file could also be passed as an input to the final g++ command.
LIBNAME := libIntegration.a
CXX := g++
BIN := bin
SRC := src
INCLUDE := include
LIB := lib
LIBRARIES := -l:$(LIBNAME)
EXECUTABLE := main
LIBRARY_SOURCES := $(SRC)/mxIntegration.o
EXECUTABLE_SOURCES := examples/main.o
CXXFLAGS := -Wall -Wextra -std=c++17 -ggdb -I$(INCLUDE)
all: $(BIN)/$(EXECUTABLE)
run: clean all
clear
./$(BIN)/$(EXECUTABLE)
$(LIB)/$(LIBNAME): $(LIBRARY_SOURCES)
ar rvs $@ $^
$(BIN)/$(EXECUTABLE): $(LIB)/$(LIBNAME) $(EXECUTABLE_SOURCES)
$(CXX) $(CXXFLAGS) -L$(LIB) $(EXECUTABLE_SOURCES) -o $@ $(LIBRARIES)
clean:
-rm -rf $(BIN)/*
-rm -rf $(LIB)/*
-rm -rf **/*.o
|
73,730,634
| 73,731,011
|
removing nested paths from vector of strings
|
I have an std::vector<std::string>paths where each entry is a path and I want to remove all the paths that are sub-directories of another one.
If for example I have root/dir1/, root/dir1/sub_dir/ and root/dir2/, the result should be root/dir1/, root/dir2/.
The way I've implemented it is by using std::sort + std::unique with a predicate that checks if string2 starts with string1.
std::vector<std::string> paths = getPaths();
std::sort(paths.begin(), paths.end());
const auto to_erase = std::unique(paths.begin(), paths.end(), [](const std::string & s1, const std::string & s2) {
return (s2.starts_with(s1) || s1.starts_with(s2));
});
paths.erase(to_erase, paths.end());
But since the predicate should be symetrycal I wonder if in some implementation std::unique iterate from end to start, and in that case the result will be wrong.
|
Your predicate is symmetric.
Let p be your predicate (the lambda), and a and b some strings, different from each other, but such that p(a, b) is true. Then either a.starts_with(b) or b.starts_with(a).
If a.starts_with(b), then p(b, a) is true because s2.starts_with(s1) is true in the lambda. Similarly, if b.starts_with(a), then p(b, a) is true because s1.starts_with(s2) is true in the lambda.
So, if p(a, b), then p(b, a) (and vice versa), which is the definition of a symmetric predicate.
It's not transitive though (p("a/b", "a") and p("a", "a/c") but not p("a/b", "a/c")), but I can't see a way this could pose a problem in practice. It could definitely lead to different results if the input isn't sorted, but yours is.
So your implementation is probably fine.
|
73,730,636
| 73,731,842
|
objects that're usable constant expressions
|
I have the following code that demonstrate my problem:
int main(void)
{
const int ci = 42;
constexpr int j = ci;
}
The above program compiles fines. But I'm expecting it to be ill-formed.
First, the initializer 42 is an integral constant expression converted to int via identity conversion; then, the converted expression is a core constant expression: ([const.expr]/9)
An integral constant expression is an expression of integral or
unscoped enumeration type, implicitly converted to a prvalue, where
the converted expression is a core constant expression.
Second, I claim that the expression E = ci is not usable in constant expression because it's not constant-initialized (i.e, it's variable has no static duration) even though the expression ci is potentially-constant variable (i.e, it's variable of const-qualified integral type). So you can't apply the rule [expr.const]/4 because it requires the object to be potentially-constant as well as constant-initialized:
A constant-initialized potentially-constant variable V is usable in constant expressions at a point P if V's initializing declaration D
is reachable from P and ..
So per my understanding, for variable ci to be constant-initialized, it has to have a static duration as well as a constant-expression initializer.
Assuming my understanding is correct so far, I will continue.
In the initialization of j, an lvalue-to-rvalue conversion is performed on the glvalue ci; but this conversion is applied to a non-volatile glvalue that refers to an object ci that is not usable in constant expressions. So I'm expecting the program to be ill-formed because the expression E evaluates an lvalue-to-rvalue conversion and neither rule in [expr.const]/(5.9) permits it. Am I correct? What I'm missing/conflating here? Am I missing any wording?
As a sidenote, in C++17, the rule regarding this point is more restricted and clear at least for me:
an lvalue-to-rvalue conversion unless it is applied to
(2.7.1) a non-volatile glvalue of integral or enumeration type that refers to a complete non-volatile const object with a preceding
initialization, initialized with a constant expression [..]
It's definitely clear to me that the wording is applied here and (2.7.1) is satisfied. But the wording regarding this point specifically is changed in C++20: The term usable "usable in constant expressions" appears since C++20 per my search. Note that, if possible, I need a C++20 answer.
|
By the definition of "constant-initialized" ([expr.const]/2), the following is constant-initialized because 42 is a constant expression.
int ci = 42;
By the definition of "potentially-constant" ([expr.const]/3), the following is potentially-constant because the variable is a const-qualified integral type.
const int ci;
If you have both of these at function scope, then you have a variable that is usable in constant expressions from the point of definition until the end of the variable's scope (special case of [expr.const]/4).
|
73,731,493
| 73,731,571
|
Why does virtual not call the overridden function here?
|
#include <iostream>
struct MemA { virtual void tellClass() { std::cout << "I am member of class A" << std::endl; } };
struct MemB : public MemA { void tellClass() { std::cout << "I am member of class B" << std::endl; } };
class A {
MemA *current;
public:
A() : current(new MemA()) {}
void getMemClass() { current->tellClass(); }
~A() { delete current; }
};
class B : public A {
MemB *current;
public:
B() : A(), current(new MemB()) {}
~B() { delete current; }
};
void main() { B().getMemClass(); }
In the above program I have declared the tellClass() function as virtual which means it should decide which function to call at runtime. Despite that it is printing "I am member of class A" even though I am calling getMemClass() from a class B object which has hidden the MemA *current with MemB *current during inheritence.
How do I get this to work properly?
|
current in A is a different member from the current in B, i.e. there are two members A::current and B::current. B has both of them as member.
The former is only hidden in as so far as naming current unqualified in the context of class B refers to B::current instead of A::current. In the context of class A (where you are evaluating the call current->tellClass();) the unqualified name still refers to the A::current member.
Data members in C++ cannot be overridden in the same sense as (virtual) member functions can.
The A::current member of your B object is pointing to a MemA complete object, not a MemB object.
|
73,731,776
| 73,731,915
|
How to initialize the array-like member variable in the constructor?
|
How to initialize the array-like member variable?
The visual studio code says:
no matching function for call to 'Node::Node()' gcc line 12 col 9
const int N = 100;
struct Node {
int val, ch[2];
/**
void init(int _val) {
this->val = _val, this->ch[0] = this->ch[1] = 0;
}*/
Node (int _val): val(_val) {
this->ch[0] = this->ch[1] = 0;
}
} tree[N]; // <--------- this is line 12
int main() {
int a;
cin >> a;
tree[0] = Node(a);
}
|
The problem is that when you wrote tree[N] you're creating an array whose elements will be default constructed but since there is no default constructor for your class Node, we get the mentioned error.
Also, Node doesn't have a default constructor because you've provided a converting constructor Node::Node(int) so that the compiler will not automatically synthesize the default ctor Note::Node().
To solve this you can add a default ctor Node::Node() for your class.
|
73,732,145
| 73,732,292
|
std::function template with multiple template parameters
|
When looking for documentation on std::function, I found several pages, that list two implementations of std::function for C++11:
https://en.cppreference.com/w/cpp/utility/functional/function
template< class >
class function; /* undefined */
template< class R, class... Args >
class function<R(Args...)>;
https://www.tutorialspoint.com/cpp_standard_library/functional.htm,
template <class T> function; // undefined
template <class Ret, class... Args> class function<Ret(Args...)>;
https://cplusplus.com/reference/functional/function/
template<class >
class function;
and
template< class R, class... Args >
class function<R(Args...)>
I tried to use the multi-parameter version of the function<>-template, but my code does not compile, neither with visual c++ 2017, nor with XCode or g++. Here is my sample code:
#include <functional>
int main(int argc, char*argv[])
{
std::function<void, int> cb;
}
All compilers complain on std::function taking only a single template parameter.
Can anybody explain this?
|
that list two implementations of std::function for C++11:
No they don't. That isn't what they're showing at all.
template< class >
class function; /* undefined */
is the base template, which (as it says), is never defined.
For example, std::function<int> would never make sense, so there is simply no template defined that could match that pattern.
template< class R, class... Args >
class function<R(Args...)>;
is a partial specialization.
That is, std::function<T> is only defined at all when T has the form R(Args...), meaning T is the type of a function returning R and taking the arguments Args....
Hence your attempt should be std::function<void(int)> cb; ... exactly as shown in the extensive examples at the bottom of the cppreference page you linked.
|
73,732,857
| 73,732,910
|
C++: 2D Dyanamic Arrays, outputting all values in one line
|
so I'm starting to write a program that multiplies two square matrices using dynamic 2D arrays. I'm just learning how dynamic arrays work, so I'm testing to make sure everything is storing properly.
When I run my code, it outputs the two matrices on a single line each, rather than like a matrix with rows and columns. How do I fix this?
#include <iomanip>
#include <iostream>
#include <array>
using namespace std;
int main()
{
int **C, n, m; //pointer, rows, columns for matrix 1;
int **D, p, q; //pointer, rows, columns for matrix 2;
cout << "Enter the dimensions of your matrices: ";
cin >> n >> m;
p = n;
q = m;
cout << endl;
C = new int *[n];
D = new int *[p];
for (int x=0 ; x < n; x++)
{
C[x] = new int [m];
}
for (int x=0 ; x < p; x++)
{
D[x] = new int [q];
}
cout << "Enter the values of your first matrix: ";
for (int I=0 ; I < n; I++ )
{
for (int K=0 ; K < m; K++)
cin >> C[I][K];
}
cout << "Enter the values of your second matrix: ";
for (int L=0 ; L < p; L++ )
{
for (int Z=0 ; Z < q; Z++)
cin >> D[L][Z];
}
for (int I=0 ; I < n; I++ )
{
for (int K=0 ; K < m; K++)
cout << setw(4)<< C[I][K];
}
cout << endl;
for (int L=0 ; L < p; L++ )
{
for (int Z=0 ; Z < q; Z++)
cout << setw(4)<< D[L][Z];
}
cout << endl;
}
|
Just add one more statement
cout << endl;
in your for loops like
for (int I=0 ; I < n; I++ )
{
for (int K=0 ; K < m; K++)
cout << setw(4)<< C[I][K];
cout << endl;
}
cout << endl;
for (int L=0 ; L < p; L++ )
{
for (int Z=0 ; Z < q; Z++)
cout << setw(4)<< D[L][Z];
cout << endl;
}
cout << endl;
|
73,733,203
| 73,777,523
|
How to read protobuf FileOptions in C++?
|
In the Google proto3 examples they show both global and nested custom options, including:
extend google.protobuf.FileOptions {
string my_file_option = 1001;
}
option (my_file_option) = "hello file!";
and
extend google.protobuf.MessageOptions {
optional string my_option = 51234;
}
message MyMessage {
option (my_option) = "Hello world!";
}
The example code for C++ shows how to read the MyMessage.my_option field using GetExtension on the Message object, but not how to read the global option "my_file_option".
So in C++, how would I read the contents of "my_file_option"?
|
To get the proto's options rather than a Message, you need to load the proto from the DescriptorPool
// Create descriptor pool of all loaded protos
google::protobuf::DescriptorPool descriptorPool(google::protobuf::DescriptorPool::generated_pool());
// Find specific proto file you want the options from
const google::protobuf::FileDescriptor *file = descriptorPool.FindFileByName("my_proto.proto");
// Fetch the options from the proto's FileDescriptor
const google::protobuf::FileOptions options = file->options();
// Get the contents of the specific extension (can also search by name)
std::string service = options.GetExtension(my_proto::my_file_option);
service will contain "hello file!"
|
73,733,691
| 73,739,057
|
How to manually override the automatic quotation of strings
|
I am writing to a YAML file with jbeders/yaml-cpp and I am writing IP addresses to a file. When I write the wildcard IP "*" to the file, it automatically gets quoted (since '*' is a special character in YAML). But when I want to write the IP 10.0.1.1, it does not get quoted.
This is how I assign the node for the asterix:
ip_map["ip"] = "*";
This is how I assign the node for the numerical IP:
ip_map["ip"] = "10.0.0.1";
This is the resulting file that gets emitted with defaults (yaml_out << ip_map;)
ip: "*"
ip: 10.0.1.1
I have tried setting the emitter format option like this:
YAML::Emitter yaml_out;
yaml_out.SetStringFormat(YAML::DoubleQuoted);
... but this seems to double quote everything like this:
"ip": "*"
"ip": "10.0.1.1"
How do I consistently double quote all string values and not the keys or other numerical/boolean values?
EDIT:
I dumbed down the question a little and used literals instead of a variable.
Let's say I wanted instead to have a node like this:
fileA:
original_ip: "10.0.0.1"
Which I then read in with doc = YAML::LoadFile("fileA");. I then use the value from that file and try assign it to the original ip_map["ip"], however I want to force double quotes around the IP address.
So the full snippet would look like this:
ip_map["ip"] = doc["original"].as<std::string>();
How do I force the ip_map node's assigned string value (10.0.0.1) to be emitted with double quotes?
|
You should edit your question for conciseness, such as 'Using the yaml-cpp, how to serialize a map not quoting its keys but quoting its values?'.
To the point, you should manually iterate a map alternating string formats like the following.
yaml_out << YAML::BeginMap;
for (auto p : ip_map) {
yaml_out << p.first;
yaml_out.SetStringFormat(YAML::DoubleQuoted);
yaml_out << p.second;
yaml_out.SetStringFormat(YAML::Auto);
}
yaml_out << YAML::EndMap;
|
73,734,068
| 73,735,251
|
Compiler ignores my if statement in while(true)
|
here's my code for sending messages every 3 seconds for 10 times.
but it ignores all of if statements in while(true)
double current;
double freq;
QueryPerformanceFrequency((LARGE_INTEGER*)&freq);
QueryPerformanceCounter((LARGE_INTEGER*)¤t);
float totalTime = 0.f;
float counter = 0.f;
while (true)
{
double previous = current;
QueryPerformanceCounter((LARGE_INTEGER*)¤t);
double deltaTime = (current - previous) / freq;
totalTime += deltaTime;
counter += deltaTime;
if (counter > 3.f)
{
cout << "msg Sent" << totalTime << "\n";
counter = 0;
}
if (totalTime > 30.f)
{
break;
}
}
weird thing is that if I print out two of those values in the middle,
it works fine.
double current;
double freq;
QueryPerformanceFrequency((LARGE_INTEGER*)&freq);
QueryPerformanceCounter((LARGE_INTEGER*)¤t);
float totalTime = 0.f;
float counter = 0.f;
while (true)
{
double previous = current;
QueryPerformanceCounter((LARGE_INTEGER*)¤t);
double deltaTime = (current - previous) / freq;
totalTime += deltaTime;
counter += deltaTime;
cout << totalTime << "\n";
cout << counter << "\n";
if (counter > 3.f)
{
cout << "msg Sent" << totalTime << "\n";
counter = 0;
}
if (totalTime > 30.f)
{
break;
}
}
I think it happened because of compiler optimization. if I'm right, is there any way to stop compiler messing up my code?
LARGE_INTEGER current;
LARGE_INTEGER freq;
QueryPerformanceFrequency(&freq);
QueryPerformanceCounter(¤t);
float totalTime = 0.f;
float counter = 0.f;
while (true)
{
LARGE_INTEGER previous = current;
QueryPerformanceCounter(¤t);
double deltaTime = (current.QuadPart - previous.QuadPart) / static_cast<double>(freq.QuadPart);
totalTime += deltaTime;
counter += deltaTime;
if (counter > 3.f)
{
cout << "msg Sent" << totalTime << "\n";
counter = 0;
}
if (totalTime > 30.f)
{
break;
}
}
This time I followed c++ rule and the result is same. So it's not the trouble of casting. and it also works when i put two prints in the middle.
|
It is likely that the problem is due to deltaTime being significantly smaller than 3.f and 30.f.
Since, unless either if branch is taken, the loop body doesn't do anything except measure a clock, the time between iterations is likely to be very small. Initially, this won't be a problem and deltaTime will be correctly added to counter and totalTime. However, as these variables increase in size, they will eventually become so large that deltaTime will fall below the precision provided by double. When this happens, adding deltaTime will cease to increase these variables as the result will round out to exactly the original value.
This is made worse by using float to represent counter and totalTime, which is the floating point type with the lowest precision, meaning the problem will appear for at larger values of deltaTime than if a better floating point type was used.
By adding several std::cout the loop body may be slowed enough that deltaTime is always large enough to be meaningful relative to counter and totalTime.
Instead of counting time using imprecise floating point types, use integer types when possible to avoid this problem. Based on freq you can calculate how many ticks are equivalent to your 3.f and 30.f constants. You can then just count ticks and compare that count to the calculated limits.
|
73,734,381
| 73,747,412
|
accessing class member regardless of it being a function or a data member
|
I want to write a generic accessor for a class member regardless whether it is a function or or a data member:
#include <type_traits>
namespace traits
{
template <typename T, typename = void>
struct _name;
template <typename T>
struct _name<T, std::void_t<decltype(std::declval<T>().name)>>
{
constexpr decltype(auto) operator()(const T &t) const
{
return t.name;
}
};
template <typename T>
struct _name<T, std::void_t<decltype(std::declval<T>().name())>>
{
constexpr decltype(auto) operator()(const T &t) const
{
return t.name();
}
};
template <typename T>
decltype(auto) name(const T &t)
{
return _name<T>{}(t);
}
}
#include <string>
struct beb
{
std::string name = "beb";
};
struct bob
{
std::string name() const
{
return "bob";
}
};
#include <iostream>
int main()
{
std::cout << traits::name(bob());
std::cout << traits::name(beb());
}
I am using SFINAE with void_t specialization, but it works with only single specialization. Otherwise it gives an error saying error C2953: 'traits::_name<T,void>': class template has already been defined.
MSVC latest: https://godbolt.org/z/h9WT58z8P - does not compile
GCC 12.2: https://godbolt.org/z/vc3K1M7x5 - compiles
Clang 15.0.0: https://godbolt.org/z/dqGEMfYWK - does not compile
Should this code compile? (It compiles only for GCC)
How to make it compilable?
_name is a customization point within a traits namespace. By default it accesses name using name or name(). But one can provide a specialization to use, for example getName().
|
You seem to be reinventing std::invoke. This function embodies the definition of Callable concept, and that definition has two special cases:
a pointer to data member is "callable" like a function taking the object as its single parameter: std::invoke(&C::dataMember, obj) is equivalent to obj.*dataMember
a pointer to member function is "callable" like a function taking the object as its first parameter: std::invoke(&C::memFun, obj, a, b, c) is equivalent to (obj.*memFun)(a, b, c)
Putting this together, your name can be implemented simply as
template <typename T>
decltype(auto) name(const T &t)
{
return std::invoke(&T::name, t);
}
It will do the right thing whether name is a data member or a member function. Demo
If you want _name as a customization point, just add an extra indirection: make name call _name, and the default implementation of _name call std::invoke.
|
73,734,573
| 73,734,888
|
Why can logical constness only be added to a std::span of const pointers?
|
Consider this code that attempts to create various std::span objects for a vector of raw pointers.
#include <vector>
#include <span>
int main()
{
struct S {};
std::vector<S*> v;
std::span<S*> span1{v};
std::span<S* const> span2{v};
std::span<const S* const> span3{v};
std::span<const S*> span4{v};
return 0;
}
span3 compiles fine, but span4 fails with the following error:
<source>: In function 'int main()':
<source>:58:32: error: no matching function for call to 'std::span<const main()::S*>::span(<brace-enclosed initializer list>)'
58 | std::span<const S*> span4{v};
| ^
In file included from /opt/compiler-explorer/gcc-12.2.0/include/c++/12.2.0/ranges:45,
from <source>:5:
/opt/compiler-explorer/gcc-12.2.0/include/c++/12.2.0/span:231:9: note: candidate: 'template<class _OType, long unsigned int _OExtent> requires (_Extent == std::dynamic_extent || _OExtent == std::dynamic_extent || _Extent == _OExtent) && (std::__is_array_convertible<_Type, _Tp>::value) constexpr std::span<_Type, _Extent>::span(const std::span<_OType, _OExtent>&) [with long unsigned int _OExtent = _OType; _Type = const main()::S*; long unsigned int _Extent = 18446744073709551615]'
I would either expected span3 and span4 both to fail or both to succeed. Can someone explain why logical constness can be added to to a std::span of raw pointers iff the underlying pointer is const (i.e. bitwise).
|
std::span<const S*> allows you to assign a const S* to an element.
std::vector<S*> allows you to read an element of type S*.
If std::span<const S*> were allowed to take a std::vector<S*>, then it would be possible to sneakily convert a const S* to a S*, by assigning the const S* to an element of the span and then reading the same element through the vector.
That is to say, if std::span<const S*> span4{v}; were allowed, then the following program would be valid:
#include <vector>
#include <span>
int main()
{
struct S { int value; };
std::vector<S*> v = {nullptr};
std::span<const S*> span4{v}; // Note: not allowed in reality
const S s{.value = 0};
span4[0] = &s;
S* p = v[0];
assert(p == &s);
p->value = 42; // Oops, modifies the value of a const object!
}
So in order to prevent this scenario and provide const-correctness, std::span<const S*> must not be constructible from a std::vector<S*>.
On the other hand, std::span<const S* const> does not allow you to assign to its elements, so it's safe for it to take a std::vector<S*>.
(Yes, it's the same reason that you can convert a S** to const S* const *, but you cannot convert it to const S**.)
|
73,734,711
| 73,735,263
|
Isn't this code redundant taking into account C++ memory management?
|
I'm a C++ newbie, so I don't truly understand how C++ manages memory. Isn't this code redundant?
void processData()
{
FILE* savedDataFile;
char* savedData;
try {
savedDataFile = fopen("../savedData.dump", "r");
if (!savedDataFile)
throw 0;
savedData = (char*)malloc(0xf000);
fread(savedData, 1, 0xf000, savedDataFile);
fclose(savedDataFile);
free(savedDataFile); // Do I really need this?
}
catch (int e) {
try {
fclose(savedDataFile); // Do I really need this?
}
catch (int e) {
}
free(savedDataFile); // Do I really need this?
free(savedData); // Do I really need this?
fprintf(stderr, "Failed to load saved data from the previous dump.\n");
exit(EXIT_FAILURE);
}
// Data processing... it may take a while.
free(savedData); // Do I really need this? Taking into account that this's actually the end of the program, there's only returning of EXIT_SUCCESS in the main function after it.
}
int main()
{
// Some work... then the last function's called:
processData();
return EXIT_SUCCESS;
}
I really don't like redundant code, so I want to make my code free from it. Thank you in advance for help.
|
Any FILE* pointer that is fopen()'d must be fclose()'d. Any dynamic memory that is malloc()'d must be free()'d.
So yes, in general, you need those calls, BUT only when used correctly! Which this code is not doing.
If fopen() fails, an int is thrown, and then the exception handler is exhibiting all kinds of bad/illegal behavior because it is:
using a secondary try..catch that will never be triggered.
calling fclose() on a NULL FILE*, which is undefined behavior.
calling free() on a NULL FILE*, which is OK in this case only because free(NULL) is well-defined behavior, but is wrong in general since FILE* pointers are not guaranteed to be malloc()'d to begin with. The actual allocation used is a private implementation detail of fopen(), and fclose() will handle the deallocation appropriately.
calling free() on an uninitialized char* pointer, which is undefined behavior.
Even if fopen() were successful, there are still problems, as the code is:
not checking to make sure that malloc() is successful before passing the char* pointer to fread().
calling free() on a FILE* pointer after fclose()'ing the same pointer.
The correct code could look more like this instead:
#include <stdio.h>
#include <stdlib.h>
int processData()
{
FILE* savedDataFile;
char* savedData;
size_t savedDataSize;
try {
savedDataFile = fopen("../savedData.dump", "r");
if (!savedDataFile)
throw 0; // jumps to the 'catch' that exits the function, nothing to free or close...
try {
savedData = (char*)malloc(0xf000);
if (!savedData)
throw 1; // jumps to the 'catch' that closes the file, nothing to free...
try {
savedDataSize = fread(savedData, 1, 0xf000, savedDataFile);
if (ferror(savedDataFile))
throw 2; // jumps to the 'catch' that frees the memory...
}
catch (int) {
free(savedData);
throw; // jumps to the 'catch' that closes the file...
}
}
catch (int) {
fclose(savedDataFile);
throw; // jumps to the 'catch' that exits the function...
}
}
catch (int) {
fprintf(stderr, "Failed to load saved data from the previous dump.\n");
return EXIT_FAILURE;
}
fclose(savedDataFile);
// Data processing... it may take a while.
free(savedData);
return EXIT_SUCCESS;
}
int main()
{
// Some work... then the last function's called:
return processData();
}
Which is obviously quite ugly and error-prone. You can greatly clean this up by using C++ semantics instead of C semantics, letting the compiler and standard library handle all of the memory management and cleanup for you:
#include <iostream>
#include <fstream>
#include <vector>
#include <stdexcept>
int processData()
{
std::vector<char> savedData;
try {
std::ifstream savedDataFile;
savedDataFile.exceptions(std::ifstream::failbit); // tell the stream to throw if something fails...
savedDataFile.open("../savedData.dump"); // throws if fails to open the file...
savedData.resize(0xf000); // throws if fails to allocate memory...
std::streamsize numRead = savedDataFile.readsome(savedData.data(), savedData.size()); // throws if fails to read from the file...
savedData.resize(numRead); // may shrink the size, but won't throw...
} // <-- file is closed here
catch (const std::exception &) {
std::cerr << "Failed to load saved data from the previous dump.\n";
return EXIT_FAILURE;
}
// Data processing... it may take a while.
return EXIT_SUCCESS;
} // <-- vector memory is freed here on return
int main()
{
// Some work... then the last function's called:
return processData();
}
|
73,735,234
| 73,736,741
|
I have written this code to convert an infix expression to a postfix expression using Stacks in CPP
|
#include<bits/stdc++.h>
using namespace std;
int prec(char c){
if(c=='^'){
return 3;
}else if(c=='*' || c=='/'){
return 2;
}else if(c=='+' || c=='-'){
return 1;
}
return -1;
}
string infixToPostfix(string );
int main(){
string s = "(a-b/c)*(a/k-l)";
cout<<infixToPostfix(s);
return 0;
}
string infixToPostfix(string s){
stack<char> st;
string res = "";
for(int i=0;i<s.length();i++){
if((s[i]>='a' && s[i]<='z') || (s[i]>='A' && s[i]<='Z')){
res+=s[i];
}else if(s[i]=='('){
st.push(s[i]);
}else if(s[i]==')'){
while((!st.empty()) && st.top()!='('){
res+=st.top();
st.pop();
}
if(!st.empty()){
st.pop();
}
}else{
while((!st.empty()) && prec(st.top())>prec(s[i])){
res+=st.top();
st.pop();
}
st.push(s[i]);
}
while(!st.empty()){
res+=st.top();
st.pop();
}
}
return res;
}
As you can see I'm trying to convert infix notation to postfix notation but the output is not coming as expected.
I couldn't even find any syntax error so there's a high chance that there is some logical error.
Expected Output:
abc/-ak/l-*
Actual Output:
(a-b/c*(a/k-l
I have blown my brain off trying to find the error and I still haven't. Please help me solve the issue.
|
Define two precedence tables, called outstack for operator when they are outside the stack and instack for operator when they are inside the stack.
If any operator is left to right assosiative increase the precedence from outstack to instack. If it is right to left decrease the precedence.
Op
outstack pre
instack pre
+ -
1
2
* /
3
4
^
6
5
(
7
0
)
0
x
The program below uses this logic to convert an infix expression to postfix expression.
#include <iostream>
#include <string>
#include <stack>
#include <map>
#include <vector>
// For left to right associative operator, precedence increase from out to in
// For right to left associative operator (like ^), precedence decrease from out to in
// Outside stack precedence
std::map<char, int> precedenceOutStack {
{'(', 7},
{')', 0},
{'*', 3},
{'/', 3},
{'+', 1},
{'-', 1},
{'^', 6},
};
// Inside stack precedence
std::map<char, int> precedenceInStack {
{'(', 0},
{'*', 4},
{'/', 4},
{'+', 2},
{'-', 2},
{'^', 5},
};
int getOutPrecedence(char c) {
if(precedenceOutStack.count(c) > 0) {
return precedenceOutStack[c];
} else {
return 0;
}
}
int getInPrecedence(char c) {
if(precedenceInStack.count(c) > 0) {
return precedenceInStack[c];
} else {
return 0;
}
}
std::string infixToPostfix(std::string infix) {
std::string postfix {};
std::stack<char> stk;
size_t i {};
// loop through the input string
while(infix[i]) {
// if its an operand add it to postfix
if(std::isalpha(infix[i])) {
postfix.push_back(infix[i++]);
} else {
if(!stk.empty()) {
auto outPrec = getOutPrecedence(infix[i]);
auto inPrec = getInPrecedence(stk.top());
// check the out precedence of input char with in precedence of stack top
// if its greater push the operator to stack
if( outPrec > inPrec ) {
stk.push(infix[i++]);
}
// else if it is less, append the operator from top of stack to postfix
else if(outPrec < inPrec ) {
postfix.push_back(stk.top());
stk.pop();
}
// only '(' and ')' has equal out and in precedence, ignore them
else if(outPrec == inPrec) {
stk.pop();
++i;
}
} else {
stk.push(infix[i++]);
}
}
}
// pop out remaining opreator from the stack and append them to postfix
while(!stk.empty()) {
postfix.push_back(stk.top());
stk.pop();
}
return postfix;
}
int main()
{
std::vector<std::string> inputs {
"(a-b/c)*(a/k-l)" // abc/-ak/l-*
};
for(const auto& s : inputs) {
std::cout << "Infix: " << s << " -- ";
std::cout << infixToPostfix(s) << std::endl;
}
}
|
73,735,387
| 73,735,533
|
I am not able to print the output on the screen. I am using cppreference side (GCC 12.1 (c++20 )) compiler
|
I am not able to print the output on the screen.I am using cppreference side (GCC 12.1 (c++20
)) compiler, Is there any deadlock situation in below example. Is there any online compiler i can use for trying this type of examples
#include <iostream>
#include <semaphore>
#include <thread>
#include <vector>
std::vector<int> myVec{};
std::counting_semaphore<1> prepareSignal(2); // (1)
void prepareWork()
{
myVec.insert(myVec.end(), {0, 1, 0, 3});
std::cout << "Sender: Data prepared." << '\n';
prepareSignal.release(); // (2)
}
void completeWork()
{
std::cout << "Waiter: Waiting for data." << '\n';
prepareSignal.acquire(); // (3)
myVec[2] = 2;
std::cout << "Waiter: Complete the work." << '\n';
for (auto i: myVec) std::cout << i << " ";
std::cout << '\n';
}
int main()
{
std::cout << '\n';
std::thread t1(prepareWork);
std::thread t3(completeWork);
std::thread t2(completeWork);
t1.join();
t3.join();
t2.join();
std::cout << '\n';
}
|
There is no deadlock, but you have a race condition:
In completeWork, prepareSignal.acquire does not block the execution (Based on c++ documentation: "When the counter is zero, acquire() blocks until the counter is incremented"). In this case, the counter is set to 2, and there is no other acquire.
So the program may run completeWork earlier than prepareWork, which will execute myVec[2] = 2;. This causes undefined behavior since your vector is empty.
You can test such errors using Address Sanitizer.
See a live example here that highlights this issue by adding a 1 second sleep delay to prepareWork:
https://godbolt.org/z/YYsfq84bd
The address sanitizer output tells you exactly where the error is!
|
73,735,733
| 73,735,970
|
std::from_chars overload for wchar_t?
|
Is there any reason why std::from_chars doesn't have an overload for wchar_t?
Currently, there are only four overloads one of them being:
constexpr std::from_chars_result from_chars( const char* first, const char* last,
/*see below*/& value, int base = 10 );
So what is the reason that there doesn't exist any overload for wchar_t or other character types? Is there any chance that they will be added in C++26? If not then are there equivalents for wchar_t?
|
The from/to_chars series of functions are for elementary string conversions. These are the most elemental of numeric conversions. As such, they only support the most basic encoding: your system's native narrow character set (usually Unicode codepoints less than 128 encoded as UTF-8 or ASCII).
If you have text in some other encoding, it is on yourself to convert that encoding from/to the system narrow encoding in order to use these functions.
The expected use cases for these strings are for things like writing to/reading from textual formats like JSON. Such files are almost universally UTF-8 encoded (or ASCII), and when they call for numbers, their numbers are both locale-independent (always using . for the radix mark, for example) and encodes numbers using only the ASCII-part of Unicode.
|
73,736,040
| 73,736,061
|
multiple definition of... + undefined reference to... the same function
|
my programm has various errors that i don't quite understand.
in geraet.cpp i want to override the method schalten() from elektronik.cpp, but after compiling each component i cant link them (g++ -o main main.o elektronik.o geraet.o) with these errors:
/usr/bin/ld: geraet.o: in function `schalten()':
geraet.cpp:(.text+0x0): multiple definition of `schalten()'; elektronik.o:elektronik.cpp:(.text+0x0): first defined here
/usr/bin/ld: main.o: in function `main':
main.cpp:(.text+0x23): undefined reference to `Geraet::schalten()'
collect2: error: ld returned 1 exit status
my intention for the first error was to make the method virtual in elektronik.h, but that makes the error even worse:
/usr/bin/ld: geraet.o: in function `schalten()':
geraet.cpp:(.text+0x0): multiple definition of `schalten()'; elektronik.o:elektronik.cpp:(.text+0x0): first defined here
/usr/bin/ld: main.o: warning: relocation against `_ZTV6Geraet' in read-only section `.text'
/usr/bin/ld: main.o: in function `main':
main.cpp:(.text+0x1e): undefined reference to `vtable for Geraet'
/usr/bin/ld: main.cpp:(.text+0x2e): undefined reference to `Geraet::schalten()'
/usr/bin/ld: warning: creating DT_TEXTREL in a PIE
collect2: error: ld returned 1 exit status
can someone please explain ...
...why i cant simply override the method in the child class?
...why i cant solve this problem with a virtual method?
...why the method is undefined when linking the objects?
Here my code:
main.cpp
#include "elektronik.h"
#include "geraet.h"
#include <iostream>
int main() {
Geraet tool{};
std::cout << tool.schalten() << std::endl;
return 0;
}
elektronik.h
#pragma once
class Elektronik {
public:
bool schalten();
};
elektronik.cpp
#include "elektronik.h"
bool schalten() {
return false;
}
geraet.h
#pragma once
class Geraet : public Elektronik {
public:
bool schalten();
};
geraet.cpp
#include "geraet.h"
bool schalten() {
return true;
}
|
When you define a function in cpp, you need to use the whole (prefixed) name, e.g.:
#include "elektronik.h"
bool Elektronik::schalten() {
return false;
}
Same for Geraet. This is because, without prefix, the compiler will consider it to be a free function (in the given namespace).
|
73,736,064
| 73,745,367
|
Get a pixel color on directx11 from the screen
|
I need to do function like:
RGBTRIPLE GetPixelColor(int x, int y)
to get a color on a single pixel on directx 11 from the actual frame in my screen
For the moment I have this code:
//For each Call to Present() do the following:
//Get Device
ID3D11Device* device;
HRESULT gd = pSwapChain->GetDevice(__uuidof(ID3D11Device), (void**)&device);
assert(gd == S_OK);
//Get context
ID3D11DeviceContext* context;
device->GetImmediateContext(&context);
//get back buffer
ID3D11Texture2D* backbufferTex;
HRESULT gb = pSwapChain->GetBuffer(0, __uuidof(ID3D11Texture2d), (LPVOID*)&backbufferTex);
assert(gb == S_OK);
I'am not an expert on directx.
I ask how I can get the pixel color of coordinate x,y from buffer or context or other.
Can you help me please ?
Thanks !
|
There are several ways to do that, first one, you read the entire texture back to memory and pick your pixel from there (Pseudo code) :
create another ID3D11Texture2D same size/format as backbufferTex (with no bind flags, read cpu access and staging usage.
Use CopyResource to copy your back buffer to the staging texture.
Use Map (with read flag) on the staging texture to have access to it in memory
You now have a D3D11_MAPPED_SUBRESOURCE that contains a pointer to your image data (beware of the RowPitch parameter when trying to locate pixel).
This is rather simple, but it requires to download the whole texture from vram to ram, which is rather inefficient it you want to do this only once (also your have to handle texture format properly, it can be rgb/bgr, also 10 bits or 16 bits floats in some cases.
Another version is to use a compute shader to go pick that single pixel and store it in a gpu guffer (3 or 4 floats), then only download back that single pixel.
This is the process for it:
Create a shader resource view for backbufferTex (to allow compute shader read access)
Create a constant buffer (16 bytes), to allow to upload the coordinates of the pixel you want to read.
Create a compute writeable structured buffer (16 bytes strides, 1 element)
Create a UnorderedAccessView for this buffer.
Create a staging buffer (16 bytes), read access.
Create compute shader to access pixel data (code below).
Attach your constant buffer/texture as input, UnorderedAccessView as output of compute pipeline
Call Dispatch (1,1,1)
Copy writeable structured buffer to staging buffer
Map staging buffer, which contains 4 floats as your pixel data (normalized from 0->1 normally)
Compute shader code to read the pixel
cbuffer cbPixelLocation : register(b0)
{
uint2 pixelLocation;
uint2 padding;
}
Texture2D BackBufferTexture : register(t0);
RWStructuredBuffer<float4> RWPixel : register(u0);
[numthreads(1,1,1)]
void CS(uint3 tid : SV_DispatchThreadID)
{
RWPixel[0] = BackBufferTexture.Load(int3(pixelLocation,0));
}
Setup is rather more complex, but it has the big advantage of only downloading 16 bytes of data instead of a whole texture.
|
73,736,831
| 73,741,695
|
C++ spdlog use variables
|
I'm new to spdlog and following a tutorial which looks like this:
Log.h
#pragma once
#include "spdlog/spdlog.h"
#include "spdlog/fmt/ostr.h"
namespace Engine{
class Log{
public:
static void init();
inline static std::shared_ptr<spdlog::logger>& GetCoreLoger() { return s_CoreLogger; }
inline static std::shared_ptr<spdlog::logger>& GetClientLogger () { return s_ClientLogger;}
private:
static std::shared_ptr<spdlog::logger> s_CoreLogger;
static std::shared_ptr<spdlog::logger> s_ClientLogger;
};
}
//Client log macros
#define TRACE(...) ::Engine::Log::GetClientLogger()->trace(__VA_ARGS__)
#define INFO(...) ::Engine::Log::GetClientLogger()->info(__VA_ARGS__)
#define WARN(...) ::Engine::Log::GetClientLogger()->warn(__VA_ARGS__)
#define ERROR(...) ::Engine::Log::GetClientLogger()->error(__VA_ARGS__)
Log.cpp
#include "spdlog/sinks/stdout_color_sinks.h"
namespace Engine {
std::shared_ptr<spdlog::logger> Log::s_CoreLogger;
std::shared_ptr<spdlog::logger> Log::s_ClientLogger;
void Log::init() {
//The printing pattern, can be changed for preferance,
spdlog::set_pattern("%^[%T] %n: %v%$");
s_CoreLogger = spdlog::stdout_color_mt("VIO");
s_CoreLogger->set_level(spdlog::level::trace);
s_ClientLogger = spdlog::stdout_color_mt("APP");
s_ClientLogger->set_level(spdlog::level::trace);
}
};
This is ample for my work but I cannot seem to use variable's with it. I want to use something like this:
int test_var = 12;
INFO("The variable is: ", test_var, ".");
To get an output of:
[23:01:24] APP: The variable is: 12.
Right now the first [23:01:24] APP: The variable is: part is working but for some reason I can't seem to have it display the variable.
How can I achieve this?
|
According to the spdlog's wiki pages, your formatting syntax is incorrect.
For formatting a variable, a placeholder {} is required.
Try this:
int test_var = 12;
INFO("The variable is: {}{}", test_var, ".");
// ^^^^ adding these placeholders
|
73,736,886
| 73,737,080
|
How to setup/fill a vector of structures c++
|
I have a struct and some elements in it, I am trying to create a vector of structs and fill it up but tbh im pulling my hair out cause I have no idea what I am doing. Could someone please help me with how I should set this up?
'''
#include <iostream>
#include <string>
#include <vector>
using namespace std;
//define structs
struct animalS
{
string animalType = "none";
int animalCount = 0;
bool animalEndangered = false;
};
int main()
{
vector<animalS> animal;
animal.push_back("" , 0 , true);
}
'''
enter image description here
|
You have two mistakes, first one your animalS struct doesn't have a constructor, you should add constructor like this:
animalS(const std::string &animalType, int animalCount, bool animalEndangered)
: animalType(animalType)
, animalCount(animalCount)
, animalEndangered(animalEndangered)
{}
And use push_back() like this:
animal.push_back(animalS("" , 0 , true))
|
73,736,899
| 73,737,056
|
Internally sorting a class that inherits from a vector of pointers to a user defined object (C++)
|
So I defined a class on C++ that inherits from a vector of pointers:
class SuperBinList : public std::vector<SuperBin*>{
public:
SuperBinList();
SuperBinList(const std::vector<SuperBin*>& superBinList);
virtual ~SuperBinList();
SuperBinList& operator += (SuperBin* superBin);
SuperBinList& operator += (const SuperBinList& superBin);
void sortByZbi(const double sys);
void sortBySoverB() const;
};
The SuperBin class itself is defined as:
class SuperBin{
public:
SuperBin(const VI index, const double nSig, const double nBkg, const VS mPerpLabel, const VS rIsrLabel, const VS visLabel);
virtual ~SuperBin();
VI getIndex();
double getNsig();
double getNbkg();
double getSoverB();
double getBinZbi(const double sys);
VS getMperpLabel();
VS getRisrLabel();
VS getVisLabel();
SuperBin* tryMerge(SuperBin* superBin, double sys);
private:
VI index_;
double nSig_;
double nBkg_;
double sOverB_;
VS mPerpLabel_;
VS rIsrLabel_;
VS visLabel_;
};
Now the problem I'm having is that I want the SuperBinList class to be able to sort itself in descending order in terms of any of the (double type) members of the SuperBin class (such as sOverB). For this I tried the following method using a lambda function:
void SuperBinList::sortBySoverB() const{
std::sort(this->begin(), this->end(), [](const SuperBin* lhs, const SuperBin* rhs){
return lhs->getSoverB() < rhs->getSoverB();});
}
The top error I'm getting is:
error: passing 'const SuperBin' as 'this' argument discards qualifiers [-fpermissive]
return lhs->getSoverB() < rhs->getSoverB();});
Which, as I understood from similar threads, has to do with the const specifiers. However, I'm still not sure what it is that I am doing wrong. Any help would be greatly appreciated. Please pardon my ignorance as I am a physics PhD and not a computer scientist.
|
You have your SuperBinList::sortBySoverB() function defined as const, which means it is not allowed to modify SuperBinList, and
this will have the type const SuperBinList *, rather than SuperBinList *
Similarly, your lambda is define with const pointers const SuperBin* lhs, meaning you can only call const functions.
Change your function definitions to void SuperBinList::sortBySoverB() {} and double getSoverB() const {}, and it should compile. (Generally member functions should be marked const if they are read-only operations like getters).
|
73,737,396
| 73,737,456
|
Making the user give a boolean input with while loop
|
I have just started learning C++ and trying to learn the syntax.
#include <iostream>
#include <limits>
using namespace std;
int main(){
bool answer;
cout << "Did you enjoy testing this program? (1 for yes, 0 for no) ";
cin >> answer;
while (!(cin >> answer)) {
cout << "Invalid value!\n";
cin.clear();
cin.ignore(numeric_limits<streamsize>::max(), '\n');
cout << "Please type either 0 or 1: ";
cin >> answer;
}
cout << "Your feedback has been registered. Feedback: " << answer;
}
The aim is to keep making the user ask over and over until they input either 0 or 1. The code snippet just makes things freeze when either of those values is given. How should this be fixed?
|
The cin >> answer; statement above the loop, and the cin >> answer; statement at the end of the loop body, both need to be removed.
You are prompting the user to enter a value, then you read in that value and ignore it, and then you wait for the user to enter in another value, even though you didn't prompt the user to enter more than 1 value.
If they do happen to enter a 2nd value, and it fails, your loop will then prompt the user to enter in a new value, then you read in that value and ignore it, and then you wait for the user to enter yet another value without prompting the user to do so.
You should be invoking cin >> answer only 1 time per loop iteration, eg:
#include <iostream>
#include <limits>
using namespace std;
int main(){
bool answer;
cout << "Did you enjoy testing this program? (1 for yes, 0 for no) ";
// cin >> answer; // <- remove this!
while (!(cin >> answer)) {
cin.clear();
cin.ignore(numeric_limits<streamsize>::max(), '\n');
cout << "Invalid value!\n";
cout << "Please type either 0 or 1: ";
// cin >> answer; // <- remove this!
}
cout << "Your feedback has been registered. Feedback: " << answer;
}
|
73,737,420
| 73,738,528
|
How can I use boost accumulator quantile_probability inside a class member initialization?
|
Boost Accumulator has an unfortunate quirk in which the api interface behaves differently when used inside of a class.
I am trying to use Boost Accumulator quantile_probability inside of a class but I can't figure out how to make it work.
This problem is similar to this issue:
Can boost accumulators be used as class members
#include <boost/accumulators/accumulators.hpp>
#include <boost/accumulators/statistics/stats.hpp>
#include <boost/accumulators/statistics/weighted_p_square_cumul_dist.hpp>
#include <boost/accumulators/statistics/weighted_p_square_quantile.hpp>
#include <boost/accumulators/statistics/parameters/quantile_probability.hpp>
namespace ba = boost::accumulators;
typedef ba::accumulator_set<int64_t, ba::stats<ba::tag::weighted_p_square_quantile>, int64_t> accumulator_t;
struct Foo {
accumulator_t myAcc(ba::quantile_probability = 0.90); // Doesn't work. ERROR: ‘ba::quantile_probability’ is not a type
};
accumulator_t acc(ba::quantile_probability = 0.90); // I just fine!
|
The open bracket in accumulator_t myAcc( gets parsed as a member function, in which case this is defining a function taking a variable of type ba::qunatile_probability. But that isn't a type, so it fails.
You need to write your initializer with = or {, or write it in a constructor's initializer list
struct Foo {
// One of these
accumulator_t myAcc = accumulator_t(ba::quantile_probability = 0.90);
accumulator_t myAcc{ba::quantile_probability = 0.90};
accumulator_t myAcc;
Foo() : myAcc(ba::quantile_probability = 0.90) {}
};
|
73,737,765
| 73,737,898
|
c++ - passing standard container as a template template parameter
|
So, I need to make a mixin class that would encapsulate children of some derived class. The derived class should inherit from the mixin while providing a container template as a template template argument for the mixin.
The desired code is somewhat like that:
/*template definition*/
template<template<typename T, typename A> class C>
class HasChildren {
protected:
C<T, A> m_children;
/* whatever */
};
It has an array of issues:
When trying to instantiate this class like HasChildren<std::vector<int>>(just for testing), compiler says expected a class template, got ‘std::vector<int>’. Clearly, I'm not passing a template template. I would like to know what exactly I'm trying to pass.
For C<T, A> m_children; compiler says that T and A are out of scope. When I add typename keyword for them, the error changes to error: wrong number of template arguments (1, should be 2). I would like to know why T and A are out of scope and why adding typename leads to this error.
|
You pass a type, std::vector<int>, instead of a template, std::vector.
You need to accept the template template parameters too. template<typename T, typename A> does not make the template template use T and A. They are just for documentation and can be removed.
Example:
template <template <class, class> class C, class T, class A>
// ^^^^^^^ ^^^^^^^
class HasChildren {
protected:
C<T, A> m_children;
};
int main() {
HasChildren<std::vector, int, std::allocator<int>> hc;
// ^template^ ^-----parameters-------^
}
If you prefer to instantiate HasChildren like you originally tried, by using HasChildren<std::vector<int>>, you can do that by specializing HasChildren:
template<class> class HasChildren; // primary
// specialization:
template<template <class, class> class C, class T, class A>
class HasChildren<C<T,A>> {
protected:
C<T,A> m_children;
};
int main() {
HasChildren<std::vector<int>> hc;
}
|
73,738,257
| 73,738,862
|
Calling open() on a Unix domain socket failed with error "No such device or address"
|
I'm trying to communicate between NodeJS and C program using named pipes in linux. My server program has written in NodeJS:
'use strict';
const net = require('net');
const pipename = '/tmp/pipe1';
const fs = require('fs');
let server = net.createServer(function(socket){
console.log('A new connection');
socket.on('data',function(data){
console.log(data.toString());
});
socket.on('end',function(){
console.log('Closed connection');
});
});
server.on('error',console.log);
fs.unlink(pipename,function(){
server.listen(pipename);
})
//Test unix-socket server:
setInterval(function(){
var stream = net.connect(pipename);
stream.on('error',console.log);
stream.write('hello');
stream.end();
},2000);
However, when I want to open the pipe inside C even after the NodeJS server has already started, I get error:
const char* pipename = "/tmp/pipe1";
int hPipe = open(pipename, O_WRONLY); //Error: No such device or address
When I try to do echo 'Hello World!' > /tmp/pipe1, I get bash: /tmp/pipe1: No such device or address. But ls -l /tmp yields:
srwxr-xr-x 1 root root 0 Sep 16 03:20 pipe1
How can I solve this issue?
|
The /tmp/pipe1 is not a pipe file. It's a socket file. That's what the leading s means in srwxr-xr-x.
And Bash's redirection like > does not support socket files. You need to use socket API to open the file.
With strace (e.g. strace bash -c 'echo > /tmp/sockfile') we can see:
...
openat(AT_FDCWD, "/tmp/sockfile", ...) = -1 ENXIO (No such device or address)
...
So the error code is ENXIO whose corresponding error message is No such device or address. Bash is just calling standard C API (like strerror) to print the error.
Exampe code for client side:
int
sock_connect(char * sockpath)
{
int sock_fd;
struct sockaddr_un srv_addr = { 0 };
sock_fd = socket(AF_LOCAL, SOCK_STREAM, 0);
if (sock_fd < 0) {
return -1;
}
srv_addr.sun_family = AF_LOCAL;
snprintf(srv_addr.sun_path, sizeof(srv_addr.sun_path), "%s", sockpath);
if (connect(sock_fd, (struct sockaddr *) & srv_addr, sizeof(srv_addr)) < 0) {
return -1;
}
return sock_fd;
}
|
73,738,617
| 73,906,883
|
GMP detect float exponent overflow when initializing
|
I am currently programming on 64-bit Fedora 36, and I realized that GMP floating point numbers have limitations on the exponent size: https://gmplib.org/manual/Floating_002dpoint-Functions
The exponent of each float has fixed precision, one machine word on most systems. In the current implementation the exponent is a count of limbs, so for example on a 32-bit system this means a range of roughly 2^-68719476768 to 2^68719476736, or on a 64-bit system this will be much greater
For example, the following C program prints 0.1e-3215911262793760767 on my machine.
#include <assert.h>
#include <stdio.h>
#include <gmp.h>
int main(void) {
mpf_t f;
const char *s = "1e3000000000000000000000000000000";
assert(mpf_init_set_str(f, s, 10) == 0);
assert(mpf_out_str(NULL, 10, 100, f));
printf("\n");
}
This problem also happens when using the C++ interface. The following C++ program outputs 1e+-1294967296:
#include <iostream>
#include <gmpxx.h>
int main(void) {
mpf_class f("1e3000000000");
std::cout << f << std::endl;
}
Is there a way to detect the exponent overflow? For example, I am expecting mpf_init_set_str() to return a non-zero value to indicate the error. Or a C++ exception can be raised while initializing mpf_class f. However, currently the floats initialize successfully to the wrong value. Otherwise, is this a bug in GMP?
|
This is not a bug. This is documented in the GMP manual:
The 'mpf' functions and variables have no special notion of infinity or not-a-number, and applications must take care not to overflow the exponent or results will be unpredictable.
Basically, overflow on mpf numbers is undefined behavior. If you want well-defined behavior, you should use GNU MPFR instead.
|
73,738,622
| 73,778,779
|
Error compiling Google Protocol Buffer Output (C++)
|
Update #2: Issue closed, but curious about all the error messages.
I got it to compile after including #define PROTOBUF_USE_DLLS. After the build, the Error List still shows 398 errors and the output window lists a lot of warnings, but it still compiled. Why is that?
I downloaded the Google Protocol Buffer source and was able to compile it without issues on Visual Studio 2015. However, the Google Protocol Buffer compiler generates C++ output that has a lot of compile errors. Is the compiler output below compatible with Visual Studio 2015 (C++14, I think)? Looks like a later standard of C++. If it's not, does anyone familiar with Google Protocol Buffer for C++ know how to make it output VS2015-friendly output? I downloaded from here: https://github.com/protocolbuffers/protobuf/releases
Update: Here's the compiler output:
1> Creating library C:\DEV\Visual Studio 2015\Projects\DEV\VSSolution\x64\Debug\Monitor.lib and object C:\DEV\Visual Studio 2015\Projects\DEV\VSSolution\x64\Debug\Monitor.exp
1>msgcore.pb.obj : error LNK2001: unresolved external symbol "class google::protobuf::internal::ExplicitlyConstructed<class std::basic_string<char,struct std::char_traits,class std::allocator >,8> google::protobuf::internal::fixed_address_empty_string" (?fixed_address_empty_string@internal@protobuf@google@@3V?$ExplicitlyConstructed@V?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@$07@123@A)
1>C:\DEV\Visual Studio 2015\Projects\DEV\VSSolution\x64\Debug\Monitor.dll : fatal error LNK1120: 1 unresolved externals
Strange, I see 510 compiler errors under the Error List tab of Visual Studio, but under the compiler output, I only see the one unresolved externals error above. That's a linking error implying that it already compiled? How come I don't see the compiler errors high-lighted in the screenshot below and on the Error List in the Output window?
|
The errors turned out to be Visual Studio Intellisense errors and not "actual" compile errors. For the one linking error, the issue was resolved by adding #define PROTOBUF_USE_DLLS to the protoc-generated C++ output.
|
73,738,740
| 73,738,885
|
destructor is called twice for variant-based implementation
|
I have a variadic variant_callable class object that I want to use for a runtime polymorphism. Inside it uses a visitor pattern with std::variant.
However, I came by a rather strange behavior, that is object's destructor is called twice!.
#include <utility>
#include <variant>
#include <tuple>
namespace detail
{
template<typename... Impl>
class variadic_callable
{
public:
template<typename T>
constexpr explicit variadic_callable(T &&t) //
: varImpl_(std::forward<T>(t))
{}
variadic_callable(const variadic_callable &) = delete;
variadic_callable(variadic_callable &&) = delete;
template<typename... Args>
constexpr decltype(auto) operator()(Args &&...args) const
{
return std::visit(
[argsTuple = std::forward_as_tuple(args...)](const auto &visitor) {
return std::apply(
[&visitor](auto &&...args) {
return visitor(std::forward<decltype(args)>(args)...);
},
argsTuple);
},
varImpl_);
}
private:
std::variant<Impl...> varImpl_;
};
} // namespace detail
#include <string>
#include <iostream>
int main(int, char **)
{
struct callable
{
std::string str = "Long enough string to be allocated. Oceanic";
callable()
{
std::cout << "callable()" << std::endl;
}
void operator()(int i) const
{
std::cout << str << " " << i << '\n';
}
~callable()
{
std::cout << "~callable()" << std::endl;
}
};
{
std::cout << "expcected:\n";
const auto &c = callable();
c(815);
std::cout << "finished\n";
}
std::cout << '\n';
{
std::cout << "actual\n";
const auto &w = detail::variadic_callable<callable>{callable()};
w(815);
std::cout << "finished\n";
}
}
The output:
Program returned: 0
expcected:
callable()
Long enough string to be allocated. Oceanic 815
finished
~callable()
actual
callable()
~callable()
Long enough string to be allocated. Oceanic 815
finished
~callable()
https://godbolt.org/z/d849EaqbE
I guess an UB is in-place, but I can't spot it.
What I find the most peculiar is the fact that in the "actual" case std::string resources are not destroyed after the first destructor invocation!
|
variadic_callable's constructor is being passed an object of type callable. This is a temporary object that cannot be the same object as the one stored in the std::variant (no matter how it is passed).
The callable inside the std::variant must therefore be move-constructed from the passed temporary object. Both of these objects need to be eventually destroyed, requiring two calls to callable's destructor.
To prevent this you need to pass the arguments from which callable is supposed to be constructed to variadic_callable's constructor instead (here an empty list) and then pass these on to std::variants in-place constructor, i.e.
template<typename T, typename... Args>
constexpr explicit variadic_callable(std::in_place_type_t<T> t, Args&&... args) //
: varImpl_(t, std::forward<Args>(args)...)
{}
called as
detail::variadic_callable<callable>{std::in_place_type<callable>};
Here I copied std::variant's constructor design for the in-place overload.
|
73,738,976
| 73,739,041
|
How to keep good code practices in a specific case (c++)
|
So, basically, i have a struct, let's say struct someStruct {int x; int y;}; (only as a example), and a class. For my specific situation, x must be able to be changed from any scope, but y should only be changed from inside the class.
I have no idea of what I'm supposed to do here. I have thought about making y a private property of the class, but it wouldn't make much sense since y is a property of my struct, and separating it from the struct would make it way too unclear.
If you need more details (i don't see why you would since there's really not much to describe, but i'm putting here anyways), my struct is:
typedef struct {
// A, B, C, position, transform and material should be changed from any scope (don't bother with what they mean, it's not important for the purposes of this question)
vec2 A;
vec2 B;
vec2 C;
vec2 position;
mat2 transform;
Material material;
// this should only be changed from inside the class.
// it identifies the buffer that sends the information of this triangle to the GPU,
// and makes no sense from any scope outside the class
int buffer;
} Triangle;
and my class is (basically):
class Renderer {
// note: many triangles will be stored per buffer, so I can't just assign one buffer to one triangle and vice-versa
GLuint *buffers;
Triangle* triangles;
// + some stuff to handle rendering and blah blah blah
}
If you really want it, I can post the full code, but I don't think it could be useful in any way so for now i'm not posting it
preferably try answering the general situation, so I actually learn something from this question, but a more specific answer is very welcome too (e.g. store buffer and triangle data in another way)
|
Make B private and declare Renderer as friend, e.g.:
struct Triangle {
friend class Renderer;
// A, B, C, position, transform and material should be changed from any scope (don't bother with what they mean, it's not important for the purposes of this question)
vec2 A;
private:
vec2 B;
public:
vec2 C;
vec2 position;
mat2 transform;
Material material;
// this should only be changed from inside the class.
// it identifies the buffer that sends the information of this triangle to the GPU,
// and makes no sense from any scope outside the class
int buffer;
};
class Renderer {
public:
// note: many triangles will be stored per buffer, so I can't just assign one buffer to one triangle and vice-versa
GLuint *buffers;
Triangle* triangles;
void setB(vec2 b) { triangles->B = b; } // no error
// + some stuff to handle rendering and blah blah blah
};
int main() {
Renderer renderer;
renderer.triangles = new Triangle;
renderer.triangles->A = vec2(1,2); // no error
renderer.setB(vec2(1,2)); // no error
renderer.triangles->B = vec2(1,2); // error: 'vec2 Triangle::B' is private within this context
}
|
73,739,507
| 73,739,755
|
How do you handle indivisible vector lengths with SIMD intrinsics, array not a multiple of vector width?
|
I am currently learning how to work with SIMD intrinsics. I know that an AVX 256-bit vector can contain four doubles, eight floats, or eight 32-bit integers. How do we use AVX to process arrays that aren't a multiple of these numbers.
For example, how would you add two std::vectors of 53 integers each? Would we slice as many of the vector that would fit in the SIMD vector and just manually process the remainder? Is there a better way to do this?
|
Would we slice as many of the vector that would fit in the SIMD vector and just manually process the remainder? Is there a better way to do this?
Pretty much this. A basic example that processes all number in batches of 8, and uses mask load/maskstore to handle the remainder.
void add(int* const r, const int* const a, const int* const b, const unsigned count) {
// how many blocks of 8, and how many left over
const unsigned c8 = count & ~0x7U;
const unsigned cr = count & 0x7U;
// process blocks of 8
for(unsigned i = 0; i < c8; i += 8) {
__m256i _a = _mm256_loadu_si256((__m256i*)(a + i));
__m256i _b = _mm256_loadu_si256((__m256i*)(b + i));
__m256i _c = _mm256_add_epi32(_a, _b);
_mm256_storeu_si256((__m256i*)(c + i), _c);
}
const __m128i temp[5] = {
_mm_setr_epi32(0, 0, 0, 0),
_mm_setr_epi32(-1, 0, 0, 0),
_mm_setr_epi32(-1, -1, 0, 0),
_mm_setr_epi32(-1, -1, -1, 0),
_mm_setr_epi32(-1, -1, -1, -1)
};
// I'm using mask load / mask store for the remainder here.
// (this is not the only approach)
__m256i mask;
if(cr >= 4) {
mask = _mm256_set_m128i(temp[cr&3], temp[4]);
} else {
mask = _mm256_set_m128i(temp[0], temp[cr]);
}
__m256i _a = _mm256_maskload_epi32((a + c8), mask);
__m256i _b = _mm256_maskload_epi32((b + c8), mask);
__m256i _c = _mm256_add_epi32(_a, _b);
_mm256_maskstore_epi32((c + c8), mask, _c);
}
Of course, if you happen to use your own containers (or provide your own allocators), then you can avoid most of this mess by simply ensuring all container allocations occur in multiples of 256bits.
// yes, this class is missing a lot...
class MyIntArray {
public:
MyIntArray(unsigned count, const int* data) {
// bump capacity to next multiple of 8
unsigned cap = count & 7;
if(cap) cap = 8 - cap;
capacity = cap + count;
// allocation is aligned to 256bit
alloc = new int[capacity];
size = count;
memcpy(alloc, data, sizeof(int) * size);
}
MyIntArray(unsigned count) {
// bump capacity to next multiple of 8
unsigned cap = count & 7;
if(cap) cap = 8 - cap;
capacity = cap + count;
// allocation is aligned to 256bit
alloc = new int[capacity];
size = count;
}
unsigned capacity;
unsigned size;
int* alloc;
int* begin() { return alloc; }
int* end() { return alloc + size; }
const int* begin() const { return alloc; }
const int* end() const { return alloc + size; }
};
void add(MyIntArray r, const MyIntArray a, const MyIntArray b) {
// process blocks of 8.
// we may be stamping beyond the end of the array, but not over the
// the end of the capacity allocation....
// (probably also want to check to see if the sizes match!).
for(unsigned i = 0; i < r.size; i += 8) {
__m256i _a = _mm256_loadu_si256((__m256i*)(a.alloc + i));
__m256i _b = _mm256_loadu_si256((__m256i*)(b.alloc + i));
__m256i _c = _mm256_add_epi32(_a, _b);
_mm256_storeu_si256((__m256i*)(c.alloc + i), _c);
}
}
|
73,740,325
| 73,740,707
|
How can I measure the speed difference of for loop?
|
I am curious about the items below in for loop.
for(auto) vs for(auto &)
Separating the for loop
for(auto &) vs for(const auto &)
for(int : list) vs for(auto : list) [list is integer vector]
So, I wrote the below code for testing in the C++17 version.
It looks like seems difference in CMake debug mode(without optimization)
// In debug mode
1. elapsed: 7639 (1663305922550 - 1663305914911)
2. elapsed: 3841 (1663305926391 - 1663305922550)
3. elapsed: 3810 (1663305930201 - 1663305926391)
But in release mode(with gcc -O3) there is no difference between 1 ~ 3
// release mode
1. elapsed: 0 (1663305408984 - 1663305408984)
2. elapsed: 0 (1663305409984 - 1663305409984)
3. elapsed: 0 (1663305410984 - 1663305410984)
I don't know if my test method is wrong,
Or is it correct that there is no difference depending on the optimization status?
Here is my testing source code.
// create test vector
const uint64_t max_ = 499999999; // 499,999,999
std::vector<int> v;
for (int i = 1; i < max_; i++)
v.push_back(i);
// test 1.
auto start1 = getTick();
for (auto& e : v)
{
auto t = e + 100; t += 300;
}
for (auto& e : v)
{
auto t = e + 200; t += 300;
}
auto end1 = getTick();
// test 2.
// Omit tick function
for (auto& e : v)
{
auto t1 = e + 100; t1 += 300;
auto t2 = e + 200; t2 += 300;
}
// test 3.
for (auto e : v)
{
auto t1 = e + 100; t1 += 300;
auto t2 = e + 200; t2 += 300;
}
...
And then, getTick() was obtained through chrono milliseconds.
uint64_t getTick()
{
return (duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count());
}
Also, this testing progressed on Debian aarch64
Jetson Xavier NX (jetpack 4.6, ubuntu 18.04LTS)
8Gb RAM
GCC 7.5.0
Please advise if there is anything wrong. Thank you!
|
An empty loop can optimize away, so your compiler correctly does that. But benchmarking with optimization disabled is not meaningful. C++ requires optimization to get the performance we expect for production use (especially with template library functions), and optimization or not isn't a constant factor speedup; it makes different ways to express the same logic lead to different asm, when in a normally optimized build they'd compile to the same asm.
You can't infer anything from a debug build about what's faster in a release build, not about small-scale micro-optimization things like this. See also Idiomatic way of performance evaluation?
With optimization enabled, copying to a local object can remove most of the work if you only use one member of that copy. Get used to thinking of what real work actually needs to happen for the code; often a compiler will figure out what that minimum is. For example, auto & isn't actually going to put a pointer in a register and dereference it beyond what it was doing to loop over the array in the first place, the reference variable doesn't actually exist anywhere in the asm as a separate value in a register or memory.
So this isn't something you can isolate in a benchmark without some real work in the loop, e.g. summing an array, or modifying every element. You could try using something like Benchmark::DoNotOptimize or similar inline asm to make the compiler materialize a value in a register without doing anything else, but to be sure you're benchmarking exactly the right thing, you need to understand asm and check the compiler output. (Microbenchmarking is hard!) In which case you probably can already answer the question just by looking at the asm and seeing that it's the same either way in normal cases.
It's probably easier to just check which things all compile to the same asm with optimization enabled, instead of trying to guess whether small differences in experimental timing are due to noise or might be a real difference. (And if there is a difference, whether it's just a coincidence that it was faster with this luck of the draw for code alignment and surrounding code, on this particular CPU.)
|
73,740,361
| 73,740,488
|
Getting Unexpected output in C++
|
I am trying to create a function which creates an array and return a pointer of the array:
Here's my code:
#include<iostream>
using namespace std;
int* example() {
int arr[] = {1,2,3};
int *a = arr;
return a;
}
int main() {
int *a = example();
cout << *a << endl;
cout << *(a+1) << endl;
cout << *(a+2) << endl;
return 0;
}
And here's the output:
1
1878006336
3
I don't know why am I getting that garbage value at 2nd line instead of 2.
Here are some of my observations regarding this:
I am getting desired output if I manually create the array and pointer in the main() function.
If I print the second line before the first line, I get desired output as:
2
1
3
What is the reason behind this weird behaviour?
|
The array arr is declared local to the function. As a result its lifetime ends when the function exits. We call this automatic lifetime. Returning a pointer to this memory invokes undefined behavior. The code might work the way you expect, or it might not.
To work around this, you need a lifetime that is not automatic, and which can be controlled. There are a few paths to this.
Manual memory management
You might use dynamic memory management with new and delete (or in this case delete []). This is generally discouraged as it creates many opportunities for mistakes.
int *example() {
int *a = new int[3] { 1, 2, 3 };
return a;
}
You would need to remember to delete this array.
int main() {
int *a = example();
cout << a[0] << endl;
cout << a[1] << endl;
cout << a[2] << endl;
delete[] a;
return 0;
}
Smart pointers
You could use a smart pointer, which makes explicitly deallocating unnecessary, as the memory it points to is deallocated when the smart pointer goes out of scope.
unique_ptr<int[]> example() {
auto a = unique_ptr<int[]>(new int[3] { 1, 2, 3 });
return a;
}
int main() {
auto a = example();
cout << a[0] << endl;
cout << a[1] << endl;
cout << a[2] << endl;
return 0;
}
STL containers
The most idiomatic approach is to use an STL container class.
The std::array container maps directly to the fixed size arrays seen so far. The std::vector container is more appropriate if the size is unknown at compile time, which it often will be, making this a very common container to see in idiomatic C++ code.
As with the smart pointer, the memory does not have automatic storage duration, so it can live beyond the scope in which it's declared, and the data is deallocated when the container goes out of scope.
array<int, 3> example() {
array<int, 3> a = { 1, 2, 3 };
return a;
}
int main() {
auto a = example();
cout << a[0] << endl;
cout << a[1] << endl;
cout << a[2] << endl;
return 0;
}
|
73,740,441
| 73,740,504
|
How to get min and max value from a linked list using recursive function?
|
I am started learning data structure. Currently I am learning linked list. I have crated a linked list. I want to get the minimum value and the maximum value from the list using recursive function. I can do that using loop but I want to do that recursively. I have written functions for getting minimum and maximum value. But they are returning the first element of the list.
Minimum value function:
int getMin(node *currentNode) {
int minValue = INT_MAX;
if (currentNode != NULL) {
minValue = minValue < currentNode->data ? minValue : currentNode->data;
getMin(currentNode->next);
}
return minValue;
}
Maximum value function:
int getMax(node *currentNode) {
int maxValue = INT_MIN;
if (currentNode) {
maxValue = currentNode->data > maxValue ? currentNode->data : maxValue;
getMax(currentNode->next);
}
return maxValue;
}
My full program:
#include <bits/stdc++.h>
using namespace std;
struct node {
int data;
node *next;
} *root;
void append(vector<int> vec) {
node *currentNode, *tail;
root = new node();
root->data = vec[0];
root->next = NULL;
tail = root;
for (vector<int>::iterator i = vec.begin() + 1; i < vec.end(); i++) {
currentNode = new node();
currentNode->data = *i;
currentNode->next = NULL;
tail->next = currentNode;
tail = tail->next;
}
}
void display(node *currentNode) {
if (currentNode != NULL) {
cout << currentNode->data << " ";
display(currentNode->next);
}
}
int getMin(node *currentNode) {
int minValue = INT_MAX;
if (currentNode != NULL) {
minValue = minValue < currentNode->data ? minValue : currentNode->data;
getMin(currentNode->next);
}
return minValue;
}
int getMax(node *currentNode) {
int maxValue = INT_MIN;
if (currentNode) {
maxValue = currentNode->data > maxValue ? currentNode->data : maxValue;
getMax(currentNode->next);
}
return maxValue;
}
int main() {
vector<int> vec {5, 7, 3, 4, 6};
append(vec);
display(root);
cout << "\nMin: " << getMin(root) << "\n";
cout << "Max: " << getMax(root) << "\n";
return 0;
}
I am getting the first item of the link. Why not returning the minimum and maximum value? How to fix this problem?
|
The obvious problem is that you call your function recursively, but ignore the return value. This means that only the first item in the list is considered.
int getMin(node *currentNode) {
int minValue = INT_MAX;
if (currentNode != NULL) {
minValue = minValue < currentNode->data ? minValue : currentNode->data;
getMin(currentNode->next); // the return value here is being ignored.
}
return minValue;
}
Here's the correct solution
int getMin(node *currentNode) {
if (currentNode != NULL) {
int curr = currentNode->data;
int rest = getMin(currentNode->next); // don't ignore the return value
return curr < rest ? curr : rest; // which is less? the current item or
// the minimum item in the rest of the list
}
else {
return INT_MAX;
}
}
|
73,740,530
| 73,740,594
|
Stroustrup reason for using auto when defining a variable
|
I am reading Bjarne Stroustrup "The C++ programming language" book, and it is mentioned that one of the reasons for using auto in a variable definition is:
The definition is in a large scope where we want to make the type clearly visible to readers of our code.
What is the meaning of large scope here? and anyone has an example for this statement, as I feel it is not clear how using auto is making the type clearly visible to readers of the code.
|
You took only part of the quote from the book. The entire quote is:
We use auto where we don’t have a specific reason to mention the type
explicitly. ‘‘Specific reasons’’ include:
The definition is in a large scope where we want to make the type clearly visible to readers of our code.
We want to be explicit about a variable’s range or precision (e.g., double rather than float).
The part you mentioned in the question is the Specific reason when auto should not be used, so exactly the opposite you thought.
I.e., when you want to make the type clearly visible to readers, you should not use auto.
|
73,740,861
| 73,741,200
|
Dynamic allocation and pagination
|
I'm trying to monitor (with the system monitor) the total memory dynamically allocated by a snipped (for whatever reasons: I know, it sounds academic). Here's what I use (I know I'm not deallocating, and that the code is ugly).
#include <iostream>
#include <thread>
#include <cstdint>
using namespace std;
int main()
{
long long unsigned j = 0;
while(true)
{
int * pt = new int32_t[250 * 1000 * 10]; //10MB
static long long unsigned int m2GB = 200; //loop rounds needed to allocate 2GB
j++;
if(j % 10 == 0) //100MB per 100MB
{
cout << (j*10) << "MB allocated" << endl;
this_thread::sleep_for(100ms);
}
if(j >= m2GB)
break;
}
cout << "Type sth to close" << endl;
cin >> j; //blocking
}
Thing is... I don't even see a spike with this code, while, if I'm not mistaken, it should allocate up to 2GB of memory.
... what am I doing wrong ?
|
Your code is not using the allocated memory. The compiler is going to notice that and will simply optimize the allocation away.
If you want to observe the memory being allocated use it in such a way that it is not trivial to perform the same action without the allocation. What exactly that means will depend on how good the compiler's optimizer is. It may be enough to zero-initialize the memory, or actual connection to input/output may be needed.
|
73,743,062
| 73,744,017
|
how to provide a default value for a template conditional type?
|
All
I am writing a trimStart fucntion with c++ template like the following:
template<typename T>
static T trimStart(T source, std::conditional<isWide<T>(), const wchar_t*, const char*>::type trimChars = " \t\n\r\v\f"))
{
....
}
now I like to provide a default value " \t\n\r\v\f" or L" \t\n\r\v\f" according the type of trimChars, could you please help me look at how to implement for that?
With the great help from 463035818_is_not_a_number & Philipp, I look at the latest template docs again, here is the updated skeleton of code (C++20):
//limit to T as string only
template<typename T>
concept isStr = (
std::is_same_v<T, std::string> ||
std::is_same_v<T, std::wstring>
);
// different default value according type of T
template<typename T>
constexpr auto defaultValue() {
if constexpr (std::is_same_v<T, std::wstring>)
return L" \t\n\r\v\f";
else
return " \t\n\r\v\f";
}
//claim a function parameter with a default value
template<isStr T>
static T trimStart(T source, decltype(defaultValue<T>()) trimChars = defaultValue<T>())
{
//...internal variable example
typename std::conditional<some_conditional<T>(), std::wistringstream,std::istringstream>::type
ss(...);
}
|
I rearranged your code to this:
#include <type_traits>
#include <iostream>
template <typename T> struct is_foo : std::false_type {};
struct foo{ int value;};
template <> struct is_foo<foo> : std::true_type {};
struct bar{};
template <typename T>
void func(T t, std::conditional_t<is_foo<T>::value,int,double> x = ????) {
std::cout << x << "\n";
}
int main() {
func(foo{});
func(bar{});
}
func takes a parameter of type T. Depending on a condition on this type the second argument type is decided among two types. In my code it is between int when is_foo<T> is true and double when is_foo<T> is false.
You can replace ?? with a call to a function template that returns the desired default argument:
template <typename T,typename = void>
struct get_default{
double operator()(){ return 4.2;}
};
template <typename T>
struct get_default<T,std::enable_if_t<is_foo<T>::value>> {
double operator()(){ return 42; }
};
Demo
In your specific case I would consider to simply use std::string<T>.
|
73,743,356
| 73,749,166
|
wxStyledTextCtrl - Size of AutoComp
|
I was just wondering if it is possible to find the size (in pixels) of the autocompletion control shown by the wxStyledTextCtrl.
My goal is to show a help window associated with the entry when a selection happens. Therefore, I need the location and also the width of the autocompletion control. It seems location can be found from m_STC->AutoCompPosStart() but there seems to be no way of finding the width. I am using the following code:
auto StartPos = m_STC->ToPhys(m_STC->PointFromPosition(m_STC->AutoCompPosStart()));
int MaxChars = m_STC->AutoCompGetMaxWidth(); //returns 0 unless set to a fixed value
int w, h;
m_STC->GetTextExtent(wxString("A", MaxChars), &w, &h);
return wxPoint(StartPos.x + w, StartPos.y);
I am using Windows and wxWidgets 3.2.
|
There is no way to get this information from the styled text control because the autocomp window is completely managed by Scintilla. And unfortunately, Scintilla doesn't make any methods available for getting this info.
As a hack-around, the popup is currently implemented as a child window of the styled text control. So you could do something like this:
const wxWindowList& childred = m_stc->GetChildren();
for ( auto it = childred.begin() ; it != childred.end() ; ++it )
{
// We're assuming the styled text control has at most 1 child -
// namely the autocomp popup. It might be better to check that
// the window found is in fact the auto comp popup somehow.
// win->GetPosition() will return screen coordinates, so to get client
// coordinates, ScreenToClient must be called.
wxPoint psn = m_stc->ScreenToClient(win->GetPosition());
wxSize sz = win->GetSize();
// Do something with size and position here.
}
However, this isn't guaranteed to always work. If in the future, the auto comp popup implementation is changed to use a top level window instead of a child of the control, this method will fail.
|
73,743,941
| 73,744,655
|
Non type template parameter of type std::string& compiles in gcc but not in clang
|
I am learning C++ using the books listed here. In particular, I learnt that we cannot use std::string as a non-type template parameter. Now, to further clear my concept of the subject I tried the following example which compiles in gcc and msvc but not in clang. Demo
std::string nameOk[] = {"name1", "name2"};
template<std::string &name>
void foo()
{
}
int main()
{
foo<nameOk[0]>(); //this compiles in gcc and msvc but not in clang in C++20
}
My question is which compiler is right here(if any). That is, is the program well-formed or IFNDR.
|
Clang is complaining that your template argument is a subobject. (If you make the argument a complete string object, it works.)
This behavior is based on an earlier restriction in the standard at [temp.arg.nontype], which read
For a non-type template-parameter of reference or pointer type, the value of the constant expression shall not refer to (or for a pointer type, shall not be the address of):
a subobject (6.7.2 [intro.object]),
This restriction is lifted as of P1907 which is in C++20, but Clang hasn't reflected that yet. GCC also fails when you use e.g. version 10 with C++17:
error: '& nameOk[0]' is not a valid template argument of type 'std::string&' {aka 'std::__cxx11::basic_string<char>&'} because 'nameOk[0]' is not a variable
|
73,744,258
| 73,746,967
|
asio, shared data, Active Object vs mutexes
|
I want to understand what is true-asio way to use shared data?
reading the asio and the beast examples, the only example of using shared data is http_crawl.cpp. (perhaps I missed something)
in that example the shared object is only used to collect statistics for sessions, that is the sessions do not read that object's data.
as a result I have three questions:
Is it implied that interaction with shared data in asio-style is an Active Object? i.e. should mutexes be avoided?
whether the statement will be correct that for reading the shared data it is also necessary to use "requests" to Active Object, and also no mutexes?
has anyone tried to evaluate the overhead of "requests" to Active Object, compared to using mutexes?
|
Is it implied that interaction with shared data in asio-style is an Active Object? i.e. should mutexes be avoided?
Starting at the end, yes mutexes should be avoided. This is because all service handlers (initiations and completions) will be executed on the service thread(s) which means that blocking in a handler will block all other handlers.
Whether that leads to Active Object seems to be a choice to me. Yes, a typical approach would be like Active Object (see e.g. boost::asio and Active Object), where operations queue for the data.
However, other approaches are viable and frequently seen, like e.g. the data being moving with their task(s) e.g. through a task flow.
whether the statement will be correct that for reading the shared data it is also necessary to use "requests" to Active Object, and also no mutexes?
Yes, synchronization needs to happen for shared state, regardless of the design pattern chosen (although some design pattern reduce sharing alltogether).
The Asio approach is using strands, which abstract away the scheduling from the control flow. This gives the service the option to optimize for various cases (e.g. continuation on the same strand, the case where there's only one service thread anyway etc.).
has anyone tried to evaluate the overhead of "requests" to Active Object, compared to using mutexes?
Lots of people and lots of times. Often are wary of trying Asio because "it uses locking internally". If you know what you're doing, throughput can be excellent, which goes for most patterns and industrial-strength frameworks.
Specific benchmarks depend heavily on specific implementation choices. I'm pretty sure you can find examples on github, blogs and perhaps even on this site.
(perhaps I missed something)
You're missing the fact that all IO objects are not thread-safe, which means that they themselves are shared data for any composed asynchronous operation (chain)
|
73,745,547
| 73,746,053
|
Cmake: How to statically link packages to shared library?
|
I want to create a .dll library with all its dependencies packed inside the .dll.
However, there seems to be no easy way to achieve that with Cmake. My setup:
cmake_minimum_required(VERSION 3.0.0)
project(Main VERSION 0.1.0)
add_library(Main SHARED Main.cpp)
find_package(libzippp REQUIRED)
target_link_libraries(Main PRIVATE libzippp::libzippp)
This will produce both Main.dll but also libzippp.dll.
I would like to have libzippp.dll packed (statically linked) into Main.dll.
|
Of course you can't pack one DLL into another. You have to make libzippp a static library in the first place. To do this, build libzippp with BUILD_SHARED_LIBS set to NO at the CMake command line. Then libzippp::libzippp will be a static library when you go to find_package it.
This is easy enough to show steps for:
$ git clone git@github.com:ctabin/libzippp.git
$ cmake -S libzippp -B build -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=NO -DCMAKE_INSTALL_PREFIX=$PWD/local -DLIBZIPPP_BUILD_TESTS=NO
$ cmake --build build --target install
$ tree local
local/
├── include
│ └── libzippp
│ └── libzippp.h
├── lib
│ └── libzippp_static.a
└── share
└── libzippp
├── FindLIBZIP.cmake
├── libzipppConfig.cmake
├── libzipppConfigVersion.cmake
├── libzipppTargets.cmake
└── libzipppTargets-release.cmake
|
73,745,878
| 73,746,049
|
I am trying to reverse an array using loop in cpp ? but don't know what the problem is?
|
#include <iostream>
using namespace std;
int* reverse(int arr[],int n){
int rev[100];
int j =0;
for(int i=n-1;i>=0;i--){
rev[j]=arr[i];
j++;
}
return rev;
}
int main() {
int n;
cin>>n;
int arr[100];
for(int i=0;i<n;i++){
cin>>arr[i];
}
cout<<reverse(arr,n);
}
I am trying reverse an array using loops but don't know what the error was it was returning some bin value.
|
Your rev temporary resides in automatic storage. It means that the object will be gone after the function returns. While C++ allows you to decay rev to an int* and then return said pointer, it does not mean that this returns the object itself. You merely get a pointer to an already destroyed object. Not very useful. In fact, doing anything with this pointer will cause undefined behaviour.
Usually what you want to do is reverse things in-place. That's also how std::reverse works.
So, there are two options. If you have a completely filled c-style array, you could write a reverse function like this:
template <std::size_t N>
void reverse(int (&a)[N]) {
// reverse a from 0 to N-1
}
reverse(a);
Or, if you have an only partially filled array, take a page out of the standard library and reverse a range, denoted by two iterators.
void reverse(int* begin, int* end) {
/* begin points to the first entry, end points one past the last */
}
reverse(a, a+n);
Of course, instead of using c-style arrays, you could use a dynamically growing array such as std::vector, which carries the actual size of the array around for you.
|
73,746,134
| 73,755,783
|
Is std::format going to work with things like ICU UnicodeString?
|
Rather than a long preamble, here is my core question, up front. The paragraphs below explain in more detail.
Is there a template parameter in std::format (or fmt) that will allow me to format into ICU UnicodeStrings?, or perhaps into something like char16_t[] or std::basic_string<char16_t>, while using a unicode library to deal with things like encoding and grapheme clusters?
More Explanation, Background
I see the C++20 standard has this std::format library component for formatting strings. (It's late in 2022 and I still can't use it my compiler (clang from Xcode 14), and I'm curious about the cause of the delay, but that's another question.)
I've been using this fmt library, which looks like a simpler preview of the official one.
int x = 10;
fmt::print("x is {}", x);
I've also been using ICU's UnicodeString class. It lets me correctly handle all languages and character types, from ASCII to Chinese characters to emojis.
I don't expect the fmt library to aware of Unicode out of the box. That would require that it build and link with ICU, or something like it. Here's an example of how it's not:
void testFormatUnicodeWidth() {
// Two ways to write the Spanish word "está".
char *s1 = "est\u00E1"; // U+00E1 : Latin small letter A with acute
char *s2 = "esta\u0301"; // U+0301 : Combining acute accent
fmt::print("s1 = {}, length = {}\n", s1, strlen(s1));
fmt::print("s2 = {}, length = {}\n", s2, strlen(s2));
fmt::print("|{:8}|\n", s1);
fmt::print("|{:8}|\n", s2);
}
That prints:
s1 = está, length = 5
s2 = está, length = 6
|está |
|está |
To make that width specifier work the way I want, to look nice on the screen, I could use ICU's classes, which can iterate over the visible characters ("grapheme clusters") of a string.
I don't expect std::format to require Unicode either. From what I can tell the C++ standard people create things that can run on small embedded devices. That's cool. But I'm asking if there will also be a way for me to integrate the two, so that I don't have a split world, between:
C++'s strings and format.
ICU strings if I want things to look right on screen.
|
{fmt} doesn't support ICU UnicodeString directly but you can easily write your own formatting function that does. For example:
#include <fmt/xchar.h>
#include <unistr.h>
template <typename... T>
auto format(fmt::wformat_string<T...> fmt, T&&... args) -> UnicodeString {
auto s = fmt::format(fmt, std::forward<T>(args)...);
return {s.data(), s.size()};
}
int main() {
UnicodeString s = format(L"The answer is {}.", 42);
}
Note that {fmt} supports Unicode but width estimation works on code points (like Python's str.format) instead of grapheme clusters at the moment. It will be addressed in one of the future releases.
|
73,746,195
| 73,770,353
|
Building a BSON filter from raw query string
|
Is it possible to create a collection filter from a raw query string? If so, how?
I'm using the mongocxx driver and want to use some tested queries from the mongo shell instead of building them inconveniently with that BSONCXX streambuilder. But I can not find any examples.
I tried to convert from_json(), but this throws an error
bsoncxx::from_json("{ \"val\": { $gt: 0, $lt: 9 }}");
Got parse error at "$", position 11: "SPECIAL_EXPECTED": could not parse JSON document
whereby
bsoncxx::from_json("{ \"val\": { \"$gt\": 0, \"$lt\": 9}}");
is leading to an unrecoverable exception and crashes the application.
|
Actually
bsoncxx::from_json("{ \"val\": { \"$gt\": 0, \"$lt\": 9}}");
was not the issue. The transformation from std::string to bsoncxx::view was. Not 100% sure where the reason for the crash was, but this does the trick for me.
Solution:
std::string query( R"( { "val": { "$gt": 0, "$lt": 9}} )");
collection.find(bsoncxx::from_json(query.c_str()).view());
|
73,746,392
| 73,746,635
|
C++: How to make program differentiate between multiplication and pointers, not accepting operations
|
So I'm writing a program for an assignment that multiplies two matrices using dynamic arrays only. I'm running into two problems. I can't figure out how to add specific values from two different arrays and storing that in a third array:
bag = F[add] + F[add+1]; //line 73
and I also can't figure out how to multiply specific values from two different arrays and storing that in a third array:
F[y] = (C[cnt1][cnt2]) * (D[cnt2][cnt1]); //line 68
It's reading the multiplication operation as if i'm trying to create another pointer, because I keep getting these errors:
sh -c make -s
./main.cpp:68:36: error: incompatible integer to pointer conversion assigning to 'int *' from 'int'
F[y] = (C[cnt1][cnt2]) * (D[cnt2][cnt1]);
~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~
./main.cpp:70:19: error: invalid operands to binary expression ('int *' and 'int')
if (F[n]%2 != 0)
~~~~^~
./main.cpp:73:24: error: invalid operands to binary expression ('int *' and 'int *')
bag = F[add] + F[add+1];
~~~~~~ ^ ~~~~~~~~
3 errors generated.
make: *** [Makefile:9: main] Error 1
exit status 2
This is my full code:
#include <iomanip>
#include <iostream>
using namespace std;
int main()
{
cout << endl;
int **C, n, m; //pointer, rows, columns for matrix 1;
int **D, p, q; //pointer, rows, columns for matrix 2;
int **E, r, s; //pointer, rows, columns for result matrix;
int **F, v, w; //pointer, rows, columns for array that stores values while computing the multiplication of the matrices;
cout << "Enter the dimensions of your matrices: ";
cin >> n >> m;
p = n; q = m;
r = n; s = m;
v = n;
cout << endl;
C = new int *[n];
D = new int *[p];
E = new int *[r];
for (int x=0 ; x < n; x++)
{
C[x] = new int [m];
}
for (int x=0 ; x < p; x++)
{
D[x] = new int [q];
}
for (int x=0 ; x < r; x++)
{
E[x] = new int [r];
}
for (int x=0 ; x < v; x++)
{
F[x] = new int [v];
}
cout << "Enter the values of your first matrix: ";
for (int I=0 ; I < n; I++)
{
for (int K=0 ; K < m; K++)
cin >> C[I][K];
}
cout << "Enter the values of your second matrix: ";
for (int L=0 ; L < p; L++)
{
for (int Z=0 ; Z < q; Z++)
cin >> D[L][Z];
}
cout << endl;
for (int cnt1 = 0; cnt1 < n; cnt1++)
{
for (int cnt2 = 0; cnt2 < m; cnt2++)
{
int bag;
int add = 0;
while (add < v)
{
for (int y = 0; y < n; y++)
{
F[y] = (C[cnt1][cnt2]) * (D[cnt2][cnt1]);
}
if (F[n]%2 != 0)
F[n] = F[n+1];
bag = 0;
bag = F[add] + F[add+1];
add += 2;
}
E[cnt1][cnt2] = bag;
}
}
cout << "The product of:" << endl << endl;
for (int I=0 ; I < n; I++ )
{
for (int K=0 ; K < m; K++)
cout << setw(4)<< C[I][K];
cout << endl;
}
cout << endl << "and:" << endl << endl;
for (int L=0 ; L < p; L++ )
{
for (int Z=0 ; Z < q; Z++)
cout << setw(4)<< D[L][Z];
cout << endl;
}
cout << endl << "is: " << endl << endl;
for (int T=0 ; T < r; T++)
{
for (int U=0 ; U < s; U++)
cout << setw(4) << E[T][U];
}
return 0;
}
How do I fix this?
Note: I am aware that this style is outdated and there are memory leaks, and I am aware that using vectors would be better. Unfortunately I have to do it the way my professor wants me to :\
Any help/suggestions is appreciated.
|
F[y] = (C[cnt1][cnt2]) * (D[cnt2][cnt1]); //line 68
C is int **C
D is int **D
F is int **F
So, the expression on the right side is an int.
F[y], though, is an int pointer. So, you either meant:
*(F[y]) = ... or F[y][something] = ...
There's no magic, you just need to carefully look at every type and every operation. What would also help is having meaningful variable names. This ways, it is much easier to understand what the code is attempting to do.
|
73,747,119
| 73,748,158
|
What is the usecase of calling hana::is_valid with a nullary function?
|
Boost.Hana offers boost::hana::is_valid to check whether a SFINAE-friendly expression is valid.
You can use it like this
struct Person { std::string name; };
auto has_name = hana::is_valid([](auto&& p) -> decltype((void)p.name) { });
Person joe{"Joe"};
static_assert(has_name(joe), "");
static_assert(!has_name(1), "");
However, there's a note about the argument to is_valid being a nullary function:
To check whether calling a nullary function f is valid, one should use the is_valid(f)() syntax. […]
How can I even use it by passing to it a nullary function? I mean, if a function is nullary, then how is its body gonna have any dependent context to which SFINAE can apply?
I think that maybe "lambda captures" might have something to do with the answer, but I can't really figure it out how.
|
Use case is that of checking that f is actually nullary, e.g
if constexpr (is_valid(f)()) {
f(); // Treat f as a "no args function"
} else if constexpr (is_valid(f, arg1)) {
f(arg1);
}
what the documentation says is that unlike functions of non zero arity, the is_valid predicate can only be invoked in the form:
is_valid(f)(); // Invoking "is_valid(f)", i.e. no parentheses, does NOT
// check that f is a nullary function.
reminder: to check whether e.g. a 2 arguments function call is valid you can say:
is_valid(f, arg1, arg2);
// or
is_valid(f)(arg1, arg2)
Take for example the following Demo
void f0()
{
std::cout << "Ok f0\n";
}
void f1(int)
{
std::cout << "Ok f1\n";
}
template <class F>
void test(F fun)
{
if constexpr (hana::is_valid(fun)())
{
fun();
}
else if constexpr (hana::is_valid(fun, 2))
{
fun(2);
}
}
int main() {
test(f0);
test(f1);
}
It may be obvious, but you have to keep in mind that SFINAE does not happen ON f0 or f1. SFINAE happens in the guts of is_valid between the different flavors of is_valid_impl (comments mine):
// 1
template <
typename F, typename ...Args, typename = decltype(
std::declval<F&&>()(std::declval<Args&&>()...)
)>constexpr auto is_valid_impl(int) { return hana::true_c; }
// 2
template <typename F, typename ...Args>
constexpr auto is_valid_impl(...) { return hana::false_c; }
// Substitution error on 1 will trigger version 2
So your question
"I mean, if a function is nullary, then how is its body gonna have any dependent context to which SFINAE can apply?"
has little meaning since SFINAE does not happen on the user provided function. After all, we are setting up nothing to enable SFINAE on our functions. Implementing SFINAE requires to provide more than 1 candidates that "guide" the instantiation process (check SFINAE sono buoni).
The term "SFINAE friendly" here, has to do with f (our function) being usable as type parameter for the SFINAE implementation in is_valid. For example, if in our Demo f was overloaded (replace f1(int) with f0(int)) you would get a substitution error:
<source>:22:6: note: candidate: 'template<class F> void test(F)'
22 | void test(F fun)
| ^~~~
<source>:22:6: note: template argument deduction/substitution failed:
because when the compiler reaches the deepest point of is_valid_impl it tries to instantiate the favorable version (version 1, that doesn't have ... parameter) but it cannot tell what the type of F is and produces a hard error.
For reference, the way SFINAE works is that if it could use type F, the compiler would:
make an attempt on version 1
If successfull return true (so IT IS valid)
If not successfull it would go for version 2 (substitution error is not a failure) which does not use type F as F(Args...) and hence produce a false.
|
73,747,877
| 73,748,082
|
Is what I'm doing an Insertion sort, I think the logic is correct but unconventional?
|
This code is supposed to be an Insertion sort but is it implemented as such? I'm lost. The first loop goes through the array and checks if the next element is smaller than the current element. The nested loop inserts the next element(j) correctly in its place in the sorted portion of the array.
#include <iostream>
using namespace std;
// Print array
void printArray(int array[], int arraySize)
{
for (int i = 0; i < arraySize; i++)
{
cout << array[i] << " ";
}
cout << endl;
}
int main()
{
int array1[] ={5, 3, 1, 9, 8, 2, 4, 7};
int array1Size = sizeof(array1)/sizeof(int);
printArray(array1, array1Size);
for (int i = 0; i < array1Size - 1; i++)
{
int oldNum = array1[i];
if (array1[i] > array1[i + 1])
{
array1[i] = array1[i + 1];
array1[i + 1] = oldNum;
}
int newI = array1[i];
// Check if arranged correctly
if ( i > 0)
{
// Swap bigger number and newI
for (int j = i - 1; newI < array1[j]; j--)
{
if (j < 0)
{
break;
}
array1[j + 1] = array1[j];
array1[j] = newI;
}
}
printArray(array1, array1Size);
}
return 0;
}
|
This for loop
for (int j = i - 1; newI < array1[j]; j--)
{
if (j < 0)
{
break;
}
array1[j + 1] = array1[j];
array1[j] = newI;
}
can invoke undefined behavior when j is equal to -1 due to this expression in the condition of the for loop
newI < array1[j]
And the code is too complicated. For example this code snippet
if (array1[i] > array1[i + 1])
{
array1[i] = array1[i + 1];
array1[i + 1] = oldNum;
}
where two elements are swapped is redundant. And this if statement
if ( i > 0)
{
also is redundant. It is enough to start the outer loop from 1 instead of from 0.
It is better to define a separate function. It can look for example the following way
void InsertionSort( int a[], size_t n )
{
for (size_t i = 1; i < n; i++)
{
if (a[i] < a[i - 1])
{
int tmp = a[i];
size_t j = i;
for ( ; j != 0 && tmp < a[j - 1]; --j )
{
a[j] = a[j - 1];
}
a[j] = tmp;
}
}
}
Pay attention to that the operator sizeof yields a value of the type size_t. You should use this type size_t for the variable that will be store the number of elements in the array. In general the type int is not large enough to store sizes of arrays.
If your compiler supports C++ 17 then instead of using the expression with the sizeof operator
int array1Size = sizeof(array1)/sizeof(int);
you could write at least
#include <iterator>
//...
int array1Size = std::size( array1 );
Also as the function printArray does not change the passed array then it first parameter should be declared with the qualifier const.
void printArray(const int array[], int arraySize);
Here is a demonstration program that shows usage of a separate function that sorts arrays using the insertion sort method..
#include <iostream>
#include <iterator>
void InsertionSort( int a[], size_t n )
{
for (size_t i = 1; i < n; i++)
{
if (a[i] < a[i - 1])
{
int tmp = a[i];
size_t j = i;
for ( ; j != 0 && tmp < a[j - 1]; --j )
{
a[j] = a[j - 1];
}
a[j] = tmp;
}
}
}
int main()
{
int array1[] ={5, 3, 1, 9, 8, 2, 4, 7};
for ( const auto &item : array1 )
{
std::cout << item << ' ';
}
std::cout << '\n';
InsertionSort( array1, std::size( array1 ) );
for ( const auto &item : array1 )
{
std::cout << item << ' ';
}
std::cout << '\n';
}
The program output is
5 3 1 9 8 2 4 7
1 2 3 4 5 7 8 9
|
73,748,757
| 73,748,893
|
How to delete a specific value from a doubly linked list?
|
I was given a task with a DOUBLY linked list to delete a specific number from the list. My code is giving an Access Violation error. Even after multiple dry runs, I can't figure out what is wrong. The task basically is to create a search function which finds a specific number in the linked list, and a deletion function which deletes that specific link.
node* search(int val){
node* cur=head;
while(cur!=NULL){
if(cur->data==val){
cout<<"value found "<<val<<endl;
return cur;
}
cur=cur->next;
}
cout<<"value not exist"<<endl;
return NULL;
}
bool delspval(int val){
node*temp=0;
if(search(val)==NULL){
return 0;
}
else{
temp=search(val);
temp->prev->next=temp->next;
delete temp;
temp=0;
cout<<"specific value "<<val<<" deleted"<<endl;
return 1;
}
}
In the above given code, the line temp->prev->next=temp->next; is giving the error. I'm pretty much a beginner at linked lists, so any help would be appreciated.
minimal working code:
#include<iostream>
using namespace std;
class dll{
struct node{
int data;
node *next,*prev;
};
node *head;
public:
dll(){
head=NULL;
}
void inatst(int val){
node *temp=new node;
temp->data=val;
temp->next=head;
head=temp;
}
node* search(int val){
node* cur=head;
while(cur!=NULL){
if(cur->data==val){
cout<<"value found "<<val<<endl;
return cur;
}
cur=cur->next;
}
cout<<"value not exist"<<endl;
return NULL;
}
bool delspval(int val){
node*temp=0;
if(search(val)==NULL){
return 0;
}
else{
temp=search(val);
temp->prev->next=temp->next;
delete temp;
temp=0;
cout<<"specific value "<<val<<" deleted"<<endl;
return 1;
}
}
void display(){
node*cur=head;
while(cur!=NULL){
cout<<cur->data<<" ";
cur=cur->next;
}
cout<<endl;
}
~dll(){
while(head!=NULL){
node*cur=head;
head=cur->next;
delete cur;
cur=head;
}
}
};
void main(){
dll l1;
l1.inatst(1);
l1.inatst(2);
l1.inatst(3);
l1.inatst(4);
l1.inatst(5);
l1.inatst(6);
l1.display();
l1.delspval(3);
system("pause");
}
|
For starters, the search() function is being called twice within the delspval() function:
if(search(val)==NULL){
and
temp=search(val);
that makes the delspval() function less efficient.
This statement:
temp->next->prev=temp->next;
does not make sense.
The delspval() function can be defined in the following way. I suppose that the class contains only one pointer to the head node. If the class contains also a pointer to the tail node, then the function below must be modified.
bool delspval( int val )
{
node *temp = search( val );
bool success = temp != nullptr;
if ( success )
{
if ( temp->next != nullptr )
{
temp->next->prev = temp->prev;
}
// If the class has a pointer to the tail node
// then uncomment the else part
/*
else
{
tail = temp->prev;
}
*/
if ( temp->prev != nullptr )
{
temp->prev->next = temp->next;
}
else
{
head = temp->next;
}
delete temp;
}
return success;
}
|
73,748,978
| 73,749,110
|
Compiler disagreement on using std::vector in constexpr context
|
The following code compiles with gcc and MSVC, but not with clang.
#include <array>
#include <vector>
consteval void foo(auto func) {
std::array<int, func().size()> f;
}
int main() {
foo([](){ return std::vector<int>{1,2,3,4,5};});
}
Compiler Explorer
If I understand the rules of dynamic memory allocation in constant expressions correctly, this should be allowed because the memory is deallocated immediately. Is this a bug in clang? Or even undefined behaviour?
|
It is just a bug in Clang. It seems to not consider the deallocations happening at the end of expressions as template arguments as part of the constant (full-)expression. When using a constexpr variable to store the size instead of a template argument, Clang accepts it as well.
A simplified test case (not depending on std::vector constexpr support):
struct V {
int* v = new int[10];
constexpr ~V() { delete[] v; }
constexpr int size() { return 10; }
};
template<auto>
struct A {};
int main() {
constexpr auto x = V{}.size(); //1
using T = A<V{}.size()>; //2
}
Clang accepts //1, but not //2.
On a quick look at https://github.com/llvm/llvm-project/issues I couldn't find a matching issue, so it might make sense to report it.
|
73,749,071
| 73,749,072
|
What is the advantage of Hana's type_c-and-declval dance when querying whether a SFINAE-friendly expression is valid?
|
On the one hand, the function boost::hana::is_valid is presented as follows
Checks whether a SFINAE-friendly expression is valid.
Given a SFINAE-friendly function, is_valid returns whether the function call is valid with the given arguments. Specifically, given a function f and arguments args...,
is_valid(f, args...) == whether f(args...) is valid
The result is returned as a compile-time Logical.
and an example of the usage accompanies it (from the same linked page):
struct Person { std::string name; };
auto has_name = hana::is_valid([](auto&& p) -> decltype((void)p.name) { });
Person joe{"Joe"};
static_assert(has_name(joe), "");
static_assert(!has_name(1), "");
where we see that the lambda fed to is_valid is in turn fed with the actual object that we feed to has_name.
On the other hand, the book C++ Templates - The Complete Guide presents a very similar solution (and indeed the authors cite Boost.Hana and Loius Dionne), which I omit for now the details of. This solution, however, is used in a slightly different way:
constexpr auto hasFirst = isValid([](auto x) -> decltype((void)valueT(x).first) {});
static_assert(!hasFirst(type<int>));
struct S { int first; };
static_assert(hasFirst(type<S>));
The above assumes the existence of valueT and type defined/declared below
template<typename T>
struct TypeT {
using Type = T;
};
template<typename T>
constexpr auto type = TypeT<T>{};
template<typename T>
T valueT(TypeT<T>);
Now, if I understand correctly, valueT and type correspond roughly to boost::hana::traits::declval and boost::hana::type_c, so the example from the book should map to the following
constexpr auto hasFirstH = is_valid([](auto x) -> decltype((void)traits::declval(x).first) {});
static_assert(!hasFirst(hana::type_c<int>));
struct S { int first; };
static_assert(hasFirst(hana::type_c<S>));
But what is the advantage of this?
In this answer from Louis Dionne I initially understood that it's a matter of taste, but then I thought that might be the case for that specific scenario and not in general.
|
While writing the question, I've searched more an more (to put relevant links in it, mainly), and I eventually did find the answer in the documentation of Boost.Hana at Boost.Hana > User Manual > Introspection > Checking expression validity > Non-static members: the use of hana::type_c to wrap a type T in an object (not of type T, but of type hana::type<T>!) and hana::declval to unwrap it is useful to write those type traits when there's no object around.
|
73,749,074
| 73,749,104
|
trouble figuring out for-each iterator interface in C++ with MSVC C++17
|
I've had trouble compiling iterator logic in MSVC. It results in a compilation error when trying to express an iteration using the short-hand for (type element: container) { ... }
It could be duplicate but I am unaware what keywords to search for..
Using MSVC++ 2019, C++ 17 compilation mode
/// iterator
template <typename T>
struct iter2 {
T *start;
size_t index;
///
void operator++() { index++; }
void operator--() { index--; }
///
operator T& () const { return start[index]; }
///
bool operator==(iter2 b) const { return start == b.start && index == b.index; }
bool operator!=(iter2 b) const { return start != b.start || index != b.index; }
/// defined ctr
iter2(T *start, size_t index) : start(start), index(index) { }
};
// a trivial structure.
struct field2 { int key, val; };
///
template <typename T>
struct array2 {
/// just a simple C++ array
T buffer[100];
int count;
iter2<T> begin() { return iter2<T>(buffer, 0); };
iter2<T> end() { return iter2<T>(buffer, count); };
};
static bool exists(int k) {
/// populate an array with some copied data
field2 f1 = {}, f2 = {};
array2<field2> a;
a.buffer[0] = f1;
a.buffer[1] = f2;
a.count = 2;
/// iterate through as the shorthand for (type element:container) iterate 'should' ?
/// compiles on msvc 2019, c++ 17
for (iter2<field2> it = a.begin(), e = a.end(); it != e; ++it) {
field2 &f = (field2 &)it;
if (f.key == k)
return true;
}
/// does not compile, and says:
/// (...,26): error C2100: illegal indirection
for (field2 &f: a) {
if (f.key == k)
return true;
}
return null;
}
int main(int argc, char **argv) {
exists(0);
return 0;
}
|
Your iter2 type does not satisfy the requirements of an input iterator, as it does not implement operator*, which a range-for loop uses to access the elements that are being iterated.
You need to add this to iter2:
T& operator*() { return start[index]; }
And then consider removing operator T&(), as that is not an operator that an iterator should or need to implement.
And then, in exists(), you can change your non-range for loop to use field2 &f = *it; instead.
On a side note, your pre-increment/decrement operators should return a reference to the iterator that is being updated, eg:
iter2& operator++() { ++index; return *this; }
iter2& operator--() { --index; return *this; }
And, consider adding post-increment/decrement operators, too:
iter2 operator++(int) { iter2 temp(*this); ++index; return temp; }
iter2 operator--(int) { ite2 temp(*this); --index; return temp; }
You might also consider adding an operator->, too:
T* operator->() { return &start[index]; }
|
73,749,178
| 73,749,534
|
How do move-only iterator implement postfix ++ operator?
|
What is the right way to implement an iterator that iterates over a Recordset provided below in C++ style?
class Recordset
{
public:
Recordset(const Recordset&) = delete;
Recordset& operator = (const Recordset&) = delete;
Recordset(Recordset&& other) noexcept = default;
Recordset& operator = (Recordset&&) = default;
//Moves to the next record. Returns false if the end is reached.
bool Next();
//Gets the current record as an instance of type T.
template <class T>
void Get(T& val);
};
my idea is that I probably do something like this:
template <class T>
class Iterator
{
public:
using iterator_category = std::forward_iterator_tag;
using value_type = T;
using difference_type = std::ptrdiff_t;
using pointer = value_type*;
using reference = value_type&;
Iterator() = default;
Iterator(Recordset s) : m_i(std::move(s))
{
try_next();
}
Iterator(const Iterator&) = delete;
Iterator& operator = (const Iterator&) = delete;
Iterator(Iterator&& other) = default;
Iterator& operator = (Iterator&& other) = default;
T* operator-> () { return cur(); }
T* operator* () { return cur(); }
bool operator== (const Iterator& other) const noexcept
{
//They both are end().
return !m_v && !other.m_v;
}
bool operator!= (const Iterator& other) const noexcept
{
return !operator==(other);
}
Iterator& operator++ ()
{
this->try_next();
return *this;
}
Iterator operator++ (int)
{
Iterator tmp = *this; //would not compile.
this->try_next();
return tmp;
}
private:
bool try_next()
{
if (m_i.Next())
{
T val;
m_i.Get(val);
m_v = val;
return true;
}
return false;
}
T* cur()
{
T& val = *m_v;
return &val;
}
Recordset m_i;
std::optional<T> m_v;
};
template <class T>
std::ranges::subrange<Iterator<T>> make_range(Recordset& s)
{
return std::ranges::subrange(Iterator<T>(s), Iterator<T>{});
}
and use it as follows:
struct Record { int x; std::string y; };
int main()
{
Recordset s;
for (Record& r : make_range(s))
{
std::cout << r.x << r.y << std::endl;
}
return 0;
}
The frist question is how do I implement Iterator operator++ (int) if both Recordset and Iterator are move-only? (temp and this can't point to different records, because there is only one current record in the recordset). Does C++20 require it?
The second question is it a good idea to implement end() in this way? (end() is a simply an iterator containing an empty optional)
|
Single pass move-only input iterators (A c++20 std::input_iterator) are only required to be weakly incremental, where (void) ++i has the same effect as (void) i++. You can simply have void operator++(int) { ++*this; }. Older requirements for iterators (Cpp17InputIterator) requires iterators to be copyable, and require operator++ to return that copy.
And for your second question, you might want to use a sentinel type, something like:
template<typename T>
bool operator==(const Iterator<T>& it, std::default_sentinel_t) {
return !it.m_v;
}
// != can be rewritten from ==, so no need to write one
template <class T>
auto make_range(Recordset& s)
{
return std::ranges::subrange(Iterator<T>(s), std::default_sentinel);
}
And if you need to work with a algorithm that can't use separate sentinel types, use ranges::common_view. Your current solution also works, except you need to have this == &other || (!m_v && !other.m_v);.
|
73,749,515
| 73,751,194
|
How to copy a string to a unsigned char* on a struct?
|
a.h
struct loader_struct
{
unsigned char* loader_x64;
};
extern loader_struct* g;
a.cpp
#include "a.h"
loader_struct g_startup;
loader_struct* g = &g_startup;
b.cpp
#include "a.h"
int _tmain(int argc, _TCHAR* argv[])
{
std::string mdata = "abcdefg";
g->loader_x64 = new unsigned char[mdata.length()];
std::copy( mdata.begin(), mdata.end(), g->loader_x64 );
}
I'm trying to copy the content of mdata to loader_x64, its being copied, however, it contains some rubbish at the ending, what im doing wrong?
|
You are getting "rubbish" because whatever code is reading from loader_x64 is expecting it to be null-terminated, but you are not actually null-terminating it, so the reader reaches past the end of the buffer, which is undefined behavior.
You need to null-terminate the loader_x64 buffer, eg:
#include "a.h"
int _tmain(int argc, _TCHAR* argv[])
{
std::string mdata = "abcdefg";
g->loader_x64 = new unsigned char[mdata.length()+1];
std::copy_n( mdata.c_str(), mdata.length()+1, g->loader_x64 );
...
delete[] g->loader_x64;
}
Alternatively, you can simply set loader_x64 to point directly at mdata's internal data, which is guaranteed to be null-terminated since C++11, eg:
#include "a.h"
int _tmain(int argc, _TCHAR* argv[])
{
std::string mdata = "abcdefg";
g->loader_x64 = reinterpret_cast<unsigned char*>(mdata.data()); // C++17 and later
or
g->loader_x64 = reinterpret_cast<unsigned char*>(const_cast<char*>(mdata.c_str())); // prior to C++17
...
}
|
73,749,527
| 73,749,766
|
Initialization order of static inline member variables in class templates (C++17)
|
I am working on a code where I need a static member variable of some class to be initialized using a static variable of an instance of a class template. I know about the static initialization order fiasco and found several discussions on that issue but none of them really helped me with my problem. I don't even know why that should be a problem in my case.
This is a minimal example that reproduces the error I get from my code:
#include <string>
#include <map>
template<class T>
class Foo {
private:
static inline std::map<std::string, T> map_ = {};
public:
static bool insert(const std::string& key, T value) {
map_[key] = value;
return true;
}
};
using MyFoo = Foo<char>;
class Bar {
static inline bool baz_ = MyFoo::insert("baz", 'A');
};
int main() {
// This works just fine if the definition of Bar::baz_ in line 24 is removed
//MyFoo::insert("baz", 'A');
return 0;
}
Compilation using the C++17 standard finishes with 0 warnings and 0 errors. When the program is executed, however, a segmentation fault occurs when Foo::insert is called. It appears that Foo::map_ is not initialized at that point. But shouldn't the static variables be initialized in the same order as they are defined in the code?
I should also mention that the code without the template works fine. So I am wondering if the compiler instantiates the template in a way that the the actual class is defined after Bar. Could something like that be the problem or does the compiler just happen to do the 'right' thing in that case?
|
Dynamic initialization of static data members of class template specializations (if they are not explicitly specialized) are completely unordered (indeterminately-sequenced) with any other dynamic initialization. It doesn't matter whether the member is inline or not, it doesn't matter whether the other dynamic initialization is also of a static data member of a class template specialization or not, and it also doesn't matter where the points of instantiation and definition of the static data member are located in the translation unit.
Therefore there is no way to guarantee that Foo<char>::map_ is initialized before Bar::baz_.
Instead use the common idiom of using a static member function containing the static member as a local static variable instead:
static auto& map_() {
static std::map<std::string, T> map = {};
return map;
}
// replace `map_` with `map_()` everywhere else
|
73,749,575
| 73,749,622
|
Why doesn't decltype(*this)::value_type compile?
|
Why doesn't decltype(*this) compile? It shows an error message:
error: 'value_type' is not a member of 'const Foo<char>&'
So what exactly is the reason that decltype( *this )::value_type does not compile in the below program:
#include <iostream>
#include <vector>
#include <type_traits>
template <typename charT>
struct Foo
{
using value_type = charT;
std::vector<value_type> vec;
void print( ) const;
};
template <typename charT>
void Foo<charT>::print( ) const
{
using Foo_t = std::remove_reference_t<decltype( *this )>;
// `decltype( *this )::value_type` does not compile
if constexpr ( std::is_same_v< typename Foo_t::value_type,
decltype( std::cout )::char_type > )
{
// logic
}
else if constexpr ( std::is_same_v< typename Foo_t::value_type,
decltype( std::wcout )::char_type > )
{
// logic
}
else
{
static_assert( std::is_same_v< typename Foo_t::value_type,
decltype( std::cout )::char_type > ||
std::is_same_v< typename Foo_t::value_type,
decltype( std::wcout )::char_type >,
"character type not supported" );
}
}
int main( )
{
#define IS_CHAR 1
#if IS_CHAR == 1
using FooChar = Foo<char>;
FooChar foo;
foo.vec.resize( 10, '$' );
#else
using FooWideChar = Foo<wchar_t>;
FooWideChar foo;
foo.vec.resize( 10, L'#' );
#endif
foo.print( );
}
What is special about the this pointer? Why does removing the reference with std::remove_reference_t make it compile? Everything works in the above snippet. But if I replace typename Foo_t::value_type with the more readable decltype( *this )::value_type it won't compile. So I tried my luck by using std::remove_reference_t and managed to come up with the above less straightforward (and less intuitive) solution:
using Foo_t = std::remove_reference_t<decltype( *this )>;
// and then inside the std::is_same_v
std::is_same_v< typename Foo_t::value_type, /* etc */ >
// ...
I know that there are more concise alternatives e.g. std::is_same_v< Foo<charT>::value_type, or even std::is_same_v< charT, but I find the decltype approach more self-explanatory. Are there any other solutions?
|
The result of applying unary operator* to a pointer is a reference (lvalue ref) to the pointed at value, not a copy of the pointed at value. So decltype(*this) (or decltype(*foo) for any pointer type) will always be a reference type. There's nothing special about this.
|
73,750,633
| 73,750,708
|
How do I define a function that takes a variadic class template?
|
I am trying to define a simple variant-based Result type alias, sort of like a poor man's rust-like Result type
:
namespace detail {
template <typename SuccessType, typename... ErrorTypes>
struct Result {
using type = std::variant<SuccessType, ErrorTypes...>;
};
template <typename... ErrorTypes>
struct Result<void, ErrorTypes...> {
using type = std::variant<std::monostate, ErrorTypes...>;
};
} // namespace detail
template <typename SuccessType, typename... ErrorTypes>
using Result_t = detail::Result<SuccessType, ErrorTypes...>::type;
i.e. a Result_t is just an std::variant where the 0th index is the successful result and the rest are error structs.
I defined this helper method to check if the result is good:
template <typename SuccessType, typename... ErrorTypes>
inline bool Ok(const Result_t<SuccessType, ErrorTypes...>& r) {
return r.index() == 0;
}
But I get a "no matching overloaded function found" when I try to instantiate it:
error C2672: 'Ok': no matching overloaded function found
error C2783: 'bool Ok(const detail::Result<SuccessType,ErrorTypes...>::type &)': could not deduce template argument for 'SuccessType'
struct FileError {};
struct BadJson {};
template <typename T>
using Result = Result_t<T, FileError, BadJson>;
Result<void> GetVoid() { return {}; }
TEST(ConfigFileTest, Result) {
auto res = GetVoid();
EXPECT_EQ(res.index(), 0);
bool ok = Ok(res);
EXPECT_TRUE(ok);
}
What am I doing wrong? If I just have Ok be templated like template <typename T> Ok(const T& r) it works, but makes the function too general.
|
After expanding the Result_t alias in the function parameter, it looks like this:
template <typename SuccessType, typename... ErrorTypes>
bool Ok(const detail::Result<SuccessType, ErrorTypes...>::type& r) {
return r.index() == 0;
}
The problematic part here is that the template parameters are left of the name resolution operator ::. Everything left of :: is a non-deduced context, meaning that it is not used to deduce template arguments. So since SuccessType and ErrorTypes... appear only in non-deduced context, they cannot be deduced and a call which doesn't explicitly specifies them will fail.
You can see that this rule is necessary, because theoretically any specialization of detail::Result<SuccessType, ErrorTypes...> could have a ::type that matches the arguments type. There is no way that the compiler can check this for every possible combination of types.
Instead of trying to alias types, make Result an actual new type:
template <typename SuccessType, typename... ErrorTypes>
struct Result {
using variant_type = std::variant<SuccessType, ErrorTypes...>;
variant_type variant;
};
template <typename... ErrorTypes>
struct Result<void, ErrorTypes...> {
using variant_type = std::variant<std::monostate, ErrorTypes...>;
variant_type variant;
};
template <typename SuccessType, typename... ErrorTypes>
bool Ok(const Result<SuccessType, ErrorTypes...>& r) {
return r.variant.index() == 0;
}
or something along those lines. If you really want to use the old design using only aliases, then the function should not take the nested alias as argument, but the actual type instead (which is probably not match the intent of the design):
template <typename T, typename... ErrorTypes>
bool Ok(const std::variant<T, ErrorTypes...>& r) {
return r.index() == 0;
}
(I removed the inline on the templates. inline on a function template doesn't really make much sense.)
|
73,751,476
| 73,751,613
|
capture compiling error using python subprocess
|
Using Python to script running GoogleTest with subprocess.
My code looks like
import subprocess
import logging
logging.basicConfig(filename="gtest.log",level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')
output = subprocess.check_output("g++ test.cpp -lgtest -lgtest_main -lpthread -I <some header file path>",shell = True)
logging.info(output.decode("utf-8"))
The log file only contains the executable output after compiling is a success. Sometimes, there are missing header files and the compiler will throw an error, for example:
fatal error: profile.h: No such file or directory
However, this will not be captured in the log.
My understanding is, since the compilation failed, there is never an output variable created. subprocess just executed the command and goes to the next when the previous is finished. Python cannot catch that error generated via g++.
Am I thinking it correctly? and if I want to capture the g++ compiling error into the log, what should I do?
|
Refer to the comment via @Miles Budnek:
g++ prints errors to stderr, so you need to capture that as well as stdout. Add stderr=subprocess.STDOUT to your call to capture it as well.
|
73,751,891
| 73,751,909
|
Public class member not visible when CRTP derived type is a template class
|
The below code doesn't compile.
I want Derived<T> to access m_vec member of Base. However, because Derived<T> is templated, it implemented CRTP via : public Base<Derived<T>> and m_vec is not visible.
If I change Derived<T> to just Derived, m_vec becomes visible.
Why is this/is there a workaround?
#include <vector>
template<class SUB_CLASS>
struct Base
{
Base(int config){}
std::vector<std::string> m_vec;
};
template<class T>
struct Derived : public Base<Derived<T>>
{
Derived(int config) : Base<Derived<T>>(config){}
void start()
{
m_vec.clear(); // This line doesn't compile. m_vec is not accessible
}
};
int main()
{
int config = 0;
Derived<int> d(config);
d.start();
return 0;
}
|
Access the member using
this->m_vec.clear();
That should compile.
|
73,752,011
| 73,752,467
|
How to remove button in IMGui and prevent settings window from closing?
|
I was using docking tree code of IMGui and I created a Application.cpp and Application.h in example_win32_directx9. I am using release and x64 to compile. I have set subsystem in system in Linker to Windows. Code of Application.cpp is:
#include "Application.h"
#include "imgui.h"
namespace MyApp
{
void RenderUI()
{
bool Decimal_places = false;
bool settings = false;
ImGui::Begin("Main");
if (ImGui::Button("Settings")) {
settings = true;
}
else {
settings = false;
}
ImGui::End();
if (settings==true) {
ImGui::Begin("Settings");
if (Decimal_places == false) {
if (ImGui::Button("Off")) {
Decimal_places = true;
}
}
ImGui::End();
}
//static float value = 0.0f;
//ImGui::DragFloat("Value", &value);
}
}
Code of Application.h:
#pragma once
namespace MyApp
{
void RenderUI();
}
It works somewhat fine but I do not how to remove button "off" or change it from "off" to "on" and why do settings window closes. I have already included header file in main.cpp and also using the function of header file in main.cpp.
|
To fix it, bools need to be declared out of function. After editing it, the fixed Application.cpp with more functions was:
#include "Application.h"
#include "imgui.h"
namespace MyApp
{
bool settings = false;
bool p_open = true;
bool Decimal_places = false;
void RenderUI()
{
ImGui::Begin("Main");
if (ImGui::Button("Settings")) {
settings = true;
}
ImGui::End();
if (settings == true) {
if (p_open == true) {
if (!ImGui::Begin("Settings", &p_open)) {
ImGui::End();
}
else {
if (Decimal_places == false) {
if (ImGui::Button("Off")) {
Decimal_places = true;
}
}
else if (Decimal_places == true) {
if (ImGui::Button("On")) {
Decimal_places = false;
}
}
ImGui::End();
}
}
else {
p_open = true;
settings = false;
}
}
}
}
|
73,752,168
| 73,753,282
|
Get USB detail info from PDEV_BROADCAST_DEVICEINTERFACE in Windows
|
case WM_DEVICECHANGE:
{
PDEV_BROADCAST_HDR lpdb = (PDEV_BROADCAST_HDR)lparam;
PDEV_BROADCAST_DEVICEINTERFACE lpdbv = (PDEV_BROADCAST_DEVICEINTERFACE)lpdb;
std::string path;
if (lpdb->dbch_devicetype == DBT_DEVTYP_DEVICEINTERFACE)
{
path = std::string(lpdbv->dbcc_name);
switch (wparam)
{
case DBT_DEVICEARRIVAL:
std::cout << "new device connected: " << path << "\n";
break;
case DBT_DEVICEREMOVECOMPLETE:
std::cout << "device disconnected: " << path << "\n";
break;
}
}
break;
}
I write a small Windows console app to detect usb connect/disconnect event by listen WM_DEVICECHANGE message.
How can i get more info about which usb is connected/disconnected by PDEV_BROADCAST_DEVICEINTERFACE and PDEV_BROADCAST_DEVICEINTERFACE (name, descripton, storage size, manufacture...)
|
The SetupAPI, specifically SetupDiGetClassDevs+SetupDiEnumDeviceInfo+SetupDiGetDeviceInstanceId+SetupDiGetDeviceRegistryProperty should get you started.
More information and a sample can be found here...
|
73,752,176
| 73,765,914
|
"Undefined symbols for architecture arm64" building basic SFML project on M1 mac with g++-12
|
i've been having some trouble trying to compile a very basic SFML project on an M1 mac. the exact error i've been getting is as follows:
Undefined symbols for architecture arm64: "__ZN2sf6StringC1EPKcRKSt6locale", referenced from: _main in cczblZnn.o ld: symbol(s) not found for architecture arm64 collect2: error: ld returned 1 exit status
when running
g++-12 -std=c++20 src/main.cpp -I/Users/rooster/SFML/include -o Ghost -L/Users/rooster/SFML/build/lib -lsfml-audio -lsfml-network -lsfml-graphics -lsfml-window -lsfml-system
so far, nothing i've tried has worked; i've already compiled the libs myself from the source using cmake, i've rearranged the order of the libraries in the build command, among other seemingly inconsequential tweaks in VSCode.
here's what i'm using:
VSCode
g++-12
library files compiled by me with cmake (using unix makefiles)
c++20
SFML 2.5.1_2
and here's my main.cpp:
#include <SFML/Graphics.hpp>
int main()
{
sf::RenderWindow window(sf::VideoMode(sf::Vector2u(200, 200)), "SFML");
sf::CircleShape shape(100.f);
shape.setFillColor(sf::Color::Green);
while (window.isOpen())
{
sf::Event event;
while (window.pollEvent(event))
{
if (event.type == sf::Event::Closed)
window.close();
}
window.clear();
window.draw(shape);
window.display();
}
return 0;
}
i'm relatively confident that i'm making some simple mistake. any help is greatly appreciated, and thank you for your time!!
|
fixed by scrubbing all references to SFML from my computer, installing it again with brew, and then using clang++ instead of compiling with g++ :)
|
73,752,190
| 73,752,556
|
how to update the elements of an array with the sum of previous two and next two elements?
|
how to update the elements of an array with the sum of previous two and next two elements ? given that for the first element the sum would be the sum of next two elements as there is no previous element and same is the case for last element.
for example given an array {1,2,3} the array will be updated as {5,4,3}
explanation: for 1 there is no previous element so it will be udated as 2+3=5, for 2 there is only 1 previous and only 1 next element so it will be updated as 1+3=4, similarly for 3 it will be 1+2=3.
i tried doing this with if else loops but that seems too confusing and lengthy is there any other way to solve this?
for(int i=0;i<n;i++){
if(i==0){
sum=arr[1]+arr[2];
}
if(i==1){
sum=arr[0]+arr[2]+arr[3];
}
if(i==n-1){
sum=arr[n-2]+arr[n-3];
}
if(i==n-2){
sum=arr[n-1]+arr[n-3]+arr[n-4];
}
}
the above code does not work for n==3 because element at i==1 will be same as n-2 , also this code is so lengthy. how should i solve this question?
|
You need to create a temporary array to store the initial value of arr to prevent calculating the new value of arr[i] using new values (post-update) of arr[i - 1], arr[i - 2], etc.
std::vector<int> initial_value(arr, arr + n);
for (int i = 0; i < n; ++i) {
int updated_value = 0;
if (i - 2 >= 0) {
updated_value += initial_value[i - 2];
}
if (i - 1 >= 0) {
updated_value += initial_value[i - 1];
}
if (i + 1 < n) {
updated_value += initial_value[i + 1];
}
if (i + 2 < n) {
updated_value += initial_value[i + 2];
}
arr[i] = updated_value;
}
|
73,752,533
| 73,752,699
|
Taking in a 2D vector of a string from a user but ends up getting segmentation fault
|
I am trying to input a 2d vector of string "t" times and make a grid of 2 * 2 size "t" times using C++ and the inputs can be integers 0 to 8 (inclusive) and ".", so i tried using a 2d vector of string but I am getting segmentation fault when accessing any element of a row > 0. I think it's because cin is buffered. Can anyone guide to how to debug this or use any other method.
here's the code inside main
int t;
cin>>t;
while(t--)
{
vector<vector<string> > grid(2);
vector<string> temp(2);
// vector<vector<char> > grid(2);
// vector<char> temp(2);
for(int i = 0; i < 2; i++)
{
for(int j = 0; j < 2; j++)
{
cout<<j<<" "; //loop is running when *enter* is pressed
cin>>temp[j];
// cin.get(temp[j]);
}
grid[i] = temp;
temp.clear();
}
cout<<"t-> "<<t<<" "<< grid[1][0]<<endl; //getting segmentation fault here
}
edit: if I put the vector<string> temp(2) inside first loop then it's working but idk why coz i did use clear() so idk why is this the case.
|
No, nothing to do with cin being buffered.
The error is here
temp.clear();
That line changes the size of temp to be zero, so on the next input cin >> temp[j]; you have a vector subscript error because temp has zero size.
Just remove the line temp.clear(); and your code will work.
|
73,752,664
| 73,842,198
|
GLFW closing window automatically
|
I am trying to create a simple program using OpenGL
I have set up some key callbacks which is triggered everytime I run the code
The main loop is
while (glfwWindowShouldClose(window) == 0)
{
renderGL();
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwTerminate();
return 0;
and the key_callback function has the line
if (key == GLFW_KEY_Q && action == GLFW_PRESS){
glfwSetWindowShouldClose(window, GL_TRUE);
}
The code is working fine without it but if I close the window by pressing Q then then run it again then this block of code is executed and the window closes,
To stop it from happening if I remove it for once and run then again it starts working fine,
What might be the problem here
|
https://github.com/microsoft/wslg/issues/207
This is a known issue in WSL in which the buffer from the previous commands is called back to be used when the program runs again
|
73,752,693
| 73,752,802
|
Any way to prevent/limit user input
|
I am creating a simple number memorising game which can be played using command lines/console. Each round there will be one more digit of number you need to memorise. Then you have to enter it, if it's correct you get some points and the game continues.
There is a problem where the use can input/type when the number you have to memorise is displaying.
Is there anyway to stop or clear user input?
Here is my game code:
#include<bits/stdc++.h>
using namespace std;
bool correct=true;
long long score,r=1,num,ans;
int rng(){
srand(time(NULL));
int rn=rand()%10;
return rn;
}
void checkans(int ans){
if(ans==num)score+=r*100;
else correct=false;
}
void displaytext(string t,double delay,bool end){
int i=0;
while (t[i]!='\0'){
cout<<t[i];
usleep(1000000*delay);
i++;
}
if(end)cout<<endl;
}
int main(){
displaytext("Hello!",0.1,false);
usleep(1000000);
system("CLS");
displaytext("Welcome to Just a Number Memorising Game!",0.1,false);
usleep(1000000);
system("CLS");
usleep(1000000);
displaytext("Let's begin!",0.1,false);
usleep(1000000);
system("CLS");
displaytext("[SCORE]: ",0.1,false);
displaytext(to_string(score),0.05,true);
for(int i=1;i<=to_string(score).size()+9;i++)displaytext("-",0.05,false);
while(correct){
system("CLS");
num=num*10+rng();
cout<<"[SCORE]: ";
displaytext(to_string(score),0.05,true);
for(int i=1;i<=to_string(score).size()+9;i++)cout<<"-";
displaytext("-",0.05,true);
displaytext("MEMORISE THIS --> ",0.05,false);
displaytext(to_string(num),0.2,false);
usleep(2000000);
system("CLS");
cout<<"[SCORE]: "<<score<<endl;
for(int i=1;i<=to_string(score).size()+9;i++)cout<<"-";
cout<<"-"<<endl;
displaytext("Enter what you memorised: ",0.05,false);
cin>>ans;
checkans(ans);
r++;
usleep(500000);
system("CLS");
}
displaytext("[GAME OVER]",0.2,true);
usleep(500000);
displaytext("[SCORE]: ",0.1,false);
displaytext(to_string(score),0.05,true);
usleep(10000000);
system("CLS");
return 0;
}
|
Since C++ cannot know, on which platform or terminal it is running, it cannot have native support for any potential terminal. Hence, you will not find such a functionality in pure C++.
Your usage of function usleep suggests that you are maybe working on a Linux compatible platform.
Here, a very often used solution is the ncurses library. This will give you everything that you need.
And it maybe already installed on your machine. Please check . . .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.