id
stringlengths 6
6
| text
stringlengths 20
17.2k
| title
stringclasses 1
value |
|---|---|---|
127924
|
/**************************************************************************/
/* scene_multiplayer.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#ifndef SCENE_MULTIPLAYER_H
#define SCENE_MULTIPLAYER_H
#include "scene_cache_interface.h"
#include "scene_replication_interface.h"
#include "scene_rpc_interface.h"
#include "scene/main/multiplayer_api.h"
class OfflineMultiplayerPeer : public MultiplayerPeer {
GDCLASS(OfflineMultiplayerPeer, MultiplayerPeer);
public:
virtual int get_available_packet_count() const override { return 0; }
virtual Error get_packet(const uint8_t **r_buffer, int &r_buffer_size) override {
*r_buffer = nullptr;
r_buffer_size = 0;
return OK;
}
virtual Error put_packet(const uint8_t *p_buffer, int p_buffer_size) override { return OK; }
virtual int get_max_packet_size() const override { return 0; }
virtual void set_target_peer(int p_peer_id) override {}
virtual int get_packet_peer() const override { return 0; }
virtual TransferMode get_packet_mode() const override { return TRANSFER_MODE_RELIABLE; };
virtual int get_packet_channel() const override { return 0; }
virtual void disconnect_peer(int p_peer, bool p_force = false) override {}
virtual bool is_server() const override { return true; }
virtual void poll() override {}
virtual void close() override {}
virtual int get_unique_id() const override { return TARGET_PEER_SERVER; }
virtual ConnectionStatus get_connection_status() const override { return CONNECTION_CONNECTED; };
};
class SceneMultiplayer : public MultiplayerAPI {
GDCLASS(SceneMultiplayer, MultiplayerAPI);
public:
enum NetworkCommands {
NETWORK_COMMAND_REMOTE_CALL = 0,
NETWORK_COMMAND_SIMPLIFY_PATH,
NETWORK_COMMAND_CONFIRM_PATH,
NETWORK_COMMAND_RAW,
NETWORK_COMMAND_SPAWN,
NETWORK_COMMAND_DESPAWN,
NETWORK_COMMAND_SYNC,
NETWORK_COMMAND_SYS,
};
enum SysCommands {
SYS_COMMAND_AUTH,
SYS_COMMAND_ADD_PEER,
SYS_COMMAND_DEL_PEER,
SYS_COMMAND_RELAY,
};
enum {
SYS_CMD_SIZE = 6, // Command + sys command + peer_id (+ optional payload).
};
// For each command, the 4 MSB can contain custom flags, as defined by subsystems.
enum {
CMD_FLAG_0_SHIFT = 4,
CMD_FLAG_1_SHIFT = 5,
CMD_FLAG_2_SHIFT = 6,
CMD_FLAG_3_SHIFT = 7,
};
// This is the mask that will be used to extract the command.
enum {
CMD_MASK = 7, // 0x7 -> 0b00000111
};
private:
struct PendingPeer {
bool local = false;
bool remote = false;
uint64_t time = 0;
};
Ref<MultiplayerPeer> multiplayer_peer;
MultiplayerPeer::ConnectionStatus last_connection_status = MultiplayerPeer::CONNECTION_DISCONNECTED;
HashMap<int, PendingPeer> pending_peers; // true if locally finalized.
Callable auth_callback;
uint64_t auth_timeout = 3000;
HashSet<int> connected_peers;
int remote_sender_id = 0;
int remote_sender_override = 0;
Vector<uint8_t> packet_cache;
NodePath root_path;
bool allow_object_decoding = false;
bool server_relay = true;
Ref<StreamPeerBuffer> relay_buffer;
Ref<SceneCacheInterface> cache;
Ref<SceneReplicationInterface> replicator;
Ref<SceneRPCInterface> rpc;
#ifdef DEBUG_ENABLED
_FORCE_INLINE_ void _profile_bandwidth(const String &p_what, int p_value);
_FORCE_INLINE_ Error _send(const uint8_t *p_packet, int p_packet_len); // Also profiles.
#else
_FORCE_INLINE_ Error _send(const uint8_t *p_packet, int p_packet_len) {
return multiplayer_peer->put_packet(p_packet, p_packet_len);
}
#endif
protected:
static void _bind_methods();
void _process_packet(int p_from, const uint8_t *p_packet, int p_packet_len);
void _process_raw(int p_from, const uint8_t *p_packet, int p_packet_len);
void _process_sys(int p_from, const uint8_t *p_packet, int p_packet_len, MultiplayerPeer::TransferMode p_mode, int p_channel);
void _add_peer(int p_id);
void _admit_peer(int p_id);
void _del_peer(int p_id);
void _update_status();
public:
virtual void set_multiplayer_peer(const Ref<MultiplayerPeer> &p_peer) override;
virtual Ref<MultiplayerPeer> get_multiplayer_peer() override;
virtual Error poll() override;
virtual int get_unique_id() override;
virtual Vector<int> get_peer_ids() override;
virtual int get_remote_sender_id() override { return remote_sender_override ? remote_sender_override : remote_sender_id; }
virtual Error rpcp(Object *p_obj, int p_peer_id, const StringName &p_method, const Variant **p_arg, int p_argcount) override;
virtual Error object_configuration_add(Object *p_obj, Variant p_config) override;
virtual Error object_configuration_remove(Object *p_obj, Variant p_config) override;
void clear();
// Usually from object_configuration_add/remove
void set_root_path(const NodePath &p_path);
NodePath get_root_path() const;
void disconnect_peer(int p_id);
Error send_auth(int p_to, Vector<uint8_t> p_bytes);
Error complete_auth(int p_peer);
void set_auth_callback(Callable p_callback);
Callable get_auth_callback() const;
void set_auth_timeout(double p_timeout);
double get_auth_timeout() const;
Vector<int> get_authenticating_peer_ids();
Error send_command(int p_to, const uint8_t *p_packet, int p_packet_len); // Used internally to relay packets when needed.
Error send_bytes(Vector<uint8_t> p_data, int p_to = MultiplayerPeer::TARGET_PEER_BROADCAST, MultiplayerPeer::TransferMode p_mode = MultiplayerPeer::TRANSFER_MODE_RELIABLE, int p_channel = 0);
String get_rpc_md5(const Object *p_obj);
const HashSet<int> get_connected_peers() const { return connected_peers; }
void set_remote_sender_override(int p_id) { remote_sender_override = p_id; }
void set_refuse_new_connections(bool p_refuse);
bool is_refusing_new_connections() const;
void set_allow_object_decoding(bool p_enable);
bool is_object_decoding_allowed() const;
void set_server_relay_enabled(bool p_enabled);
bool is_server_relay_enabled() const;
void set_max_sync_packet_size(int p_size);
int get_max_sync_packet_size() const;
void set_max_delta_packet_size(int p_size);
int get_max_delta_packet_size() const;
SceneMultiplayer();
~SceneMultiplayer();
};
#endif // SCENE_MULTIPLAYER_H
| |
127925
|
/**************************************************************************/
/* multiplayer_synchronizer.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#ifndef MULTIPLAYER_SYNCHRONIZER_H
#define MULTIPLAYER_SYNCHRONIZER_H
#include "scene_replication_config.h"
#include "scene/main/node.h"
class MultiplayerSynchronizer : public Node {
GDCLASS(MultiplayerSynchronizer, Node);
public:
enum VisibilityUpdateMode {
VISIBILITY_PROCESS_IDLE,
VISIBILITY_PROCESS_PHYSICS,
VISIBILITY_PROCESS_NONE,
};
private:
struct Watcher {
NodePath prop;
uint64_t last_change_usec = 0;
Variant value;
};
Ref<SceneReplicationConfig> replication_config;
NodePath root_path = NodePath(".."); // Start with parent, like with AnimationPlayer.
uint64_t sync_interval_usec = 0;
uint64_t delta_interval_usec = 0;
VisibilityUpdateMode visibility_update_mode = VISIBILITY_PROCESS_IDLE;
HashSet<Callable> visibility_filters;
HashSet<int> peer_visibility;
Vector<Watcher> watchers;
uint64_t last_watch_usec = 0;
ObjectID root_node_cache;
uint64_t last_sync_usec = 0;
uint16_t last_inbound_sync = 0;
uint32_t net_id = 0;
bool sync_started = false;
static Object *_get_prop_target(Object *p_obj, const NodePath &p_prop);
void _start();
void _stop();
void _update_process();
Error _watch_changes(uint64_t p_usec);
protected:
static void _bind_methods();
void _notification(int p_what);
public:
static Error get_state(const List<NodePath> &p_properties, Object *p_obj, Vector<Variant> &r_variant, Vector<const Variant *> &r_variant_ptrs);
static Error set_state(const List<NodePath> &p_properties, Object *p_obj, const Vector<Variant> &p_state);
void reset();
Node *get_root_node();
uint32_t get_net_id() const;
void set_net_id(uint32_t p_net_id);
bool update_outbound_sync_time(uint64_t p_usec);
bool update_inbound_sync_time(uint16_t p_network_time);
PackedStringArray get_configuration_warnings() const override;
void set_replication_interval(double p_interval);
double get_replication_interval() const;
void set_delta_interval(double p_interval);
double get_delta_interval() const;
void set_replication_config(Ref<SceneReplicationConfig> p_config);
Ref<SceneReplicationConfig> get_replication_config();
void set_root_path(const NodePath &p_path);
NodePath get_root_path() const;
virtual void set_multiplayer_authority(int p_peer_id, bool p_recursive = true) override;
bool is_visibility_public() const;
void set_visibility_public(bool p_public);
bool is_visible_to(int p_peer);
void set_visibility_for(int p_peer, bool p_visible);
bool get_visibility_for(int p_peer) const;
void update_visibility(int p_for_peer);
void set_visibility_update_mode(VisibilityUpdateMode p_mode);
void add_visibility_filter(Callable p_callback);
void remove_visibility_filter(Callable p_callback);
VisibilityUpdateMode get_visibility_update_mode() const;
List<Variant> get_delta_state(uint64_t p_cur_usec, uint64_t p_last_usec, uint64_t &r_indexes);
List<NodePath> get_delta_properties(uint64_t p_indexes);
SceneReplicationConfig *get_replication_config_ptr() const;
MultiplayerSynchronizer();
};
VARIANT_ENUM_CAST(MultiplayerSynchronizer::VisibilityUpdateMode);
#endif // MULTIPLAYER_SYNCHRONIZER_H
| |
127926
|
/**************************************************************************/
/* scene_replication_interface.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#ifndef SCENE_REPLICATION_INTERFACE_H
#define SCENE_REPLICATION_INTERFACE_H
#include "multiplayer_spawner.h"
#include "multiplayer_synchronizer.h"
#include "core/object/ref_counted.h"
class SceneMultiplayer;
class SceneCacheInterface;
class SceneReplicationInterface : public RefCounted {
GDCLASS(SceneReplicationInterface, RefCounted);
private:
struct TrackedNode {
ObjectID id;
uint32_t net_id = 0;
uint32_t remote_peer = 0;
ObjectID spawner;
HashSet<ObjectID> synchronizers;
bool operator==(const ObjectID &p_other) { return id == p_other; }
TrackedNode() {}
TrackedNode(const ObjectID &p_id) { id = p_id; }
TrackedNode(const ObjectID &p_id, uint32_t p_net_id) {
id = p_id;
net_id = p_net_id;
}
};
struct PeerInfo {
HashSet<ObjectID> sync_nodes;
HashSet<ObjectID> spawn_nodes;
HashMap<ObjectID, uint64_t> last_watch_usecs;
HashMap<uint32_t, ObjectID> recv_sync_ids;
HashMap<uint32_t, ObjectID> recv_nodes;
uint16_t last_sent_sync = 0;
};
// Replication state.
HashMap<int, PeerInfo> peers_info;
uint32_t last_net_id = 0;
HashMap<ObjectID, TrackedNode> tracked_nodes;
HashSet<ObjectID> spawned_nodes;
HashSet<ObjectID> sync_nodes;
// Pending local spawn information (handles spawning nested nodes during ready).
HashSet<ObjectID> spawn_queue;
// Pending remote spawn information.
ObjectID pending_spawn;
int pending_spawn_remote = 0;
const uint8_t *pending_buffer = nullptr;
int pending_buffer_size = 0;
List<uint32_t> pending_sync_net_ids;
// Replicator config.
SceneMultiplayer *multiplayer = nullptr;
SceneCacheInterface *multiplayer_cache = nullptr;
PackedByteArray packet_cache;
int sync_mtu = 1350; // Highly dependent on underlying protocol.
int delta_mtu = 65535;
TrackedNode &_track(const ObjectID &p_id);
void _untrack(const ObjectID &p_id);
void _node_ready(const ObjectID &p_oid);
bool _has_authority(const Node *p_node);
bool _verify_synchronizer(int p_peer, MultiplayerSynchronizer *p_sync, uint32_t &r_net_id);
MultiplayerSynchronizer *_find_synchronizer(int p_peer, uint32_t p_net_ida);
void _send_sync(int p_peer, const HashSet<ObjectID> &p_synchronizers, uint16_t p_sync_net_time, uint64_t p_usec);
void _send_delta(int p_peer, const HashSet<ObjectID> &p_synchronizers, uint64_t p_usec, const HashMap<ObjectID, uint64_t> &p_last_watch_usecs);
Error _make_spawn_packet(Node *p_node, MultiplayerSpawner *p_spawner, int &r_len);
Error _make_despawn_packet(Node *p_node, int &r_len);
Error _send_raw(const uint8_t *p_buffer, int p_size, int p_peer, bool p_reliable);
void _visibility_changed(int p_peer, ObjectID p_oid);
Error _update_sync_visibility(int p_peer, MultiplayerSynchronizer *p_sync);
Error _update_spawn_visibility(int p_peer, const ObjectID &p_oid);
void _free_remotes(const PeerInfo &p_info);
template <typename T>
static T *get_id_as(const ObjectID &p_id) {
return p_id.is_valid() ? Object::cast_to<T>(ObjectDB::get_instance(p_id)) : nullptr;
}
#ifdef DEBUG_ENABLED
_FORCE_INLINE_ void _profile_node_data(const String &p_what, ObjectID p_id, int p_size);
#endif
public:
static void make_default();
void on_reset();
void on_peer_change(int p_id, bool p_connected);
Error on_spawn(Object *p_obj, Variant p_config);
Error on_despawn(Object *p_obj, Variant p_config);
Error on_replication_start(Object *p_obj, Variant p_config);
Error on_replication_stop(Object *p_obj, Variant p_config);
void on_network_process();
Error on_spawn_receive(int p_from, const uint8_t *p_buffer, int p_buffer_len);
Error on_despawn_receive(int p_from, const uint8_t *p_buffer, int p_buffer_len);
Error on_sync_receive(int p_from, const uint8_t *p_buffer, int p_buffer_len);
Error on_delta_receive(int p_from, const uint8_t *p_buffer, int p_buffer_len);
bool is_rpc_visible(const ObjectID &p_oid, int p_peer) const;
void set_max_sync_packet_size(int p_size);
int get_max_sync_packet_size() const;
void set_max_delta_packet_size(int p_size);
int get_max_delta_packet_size() const;
SceneReplicationInterface(SceneMultiplayer *p_multiplayer, SceneCacheInterface *p_cache) {
multiplayer = p_multiplayer;
multiplayer_cache = p_cache;
}
};
#endif // SCENE_REPLICATION_INTERFACE_H
| |
127932
|
<?xml version="1.0" encoding="UTF-8" ?>
<class name="MultiplayerSynchronizer" inherits="Node" keywords="network" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="../../../doc/class.xsd">
<brief_description>
Synchronizes properties from the multiplayer authority to the remote peers.
</brief_description>
<description>
By default, [MultiplayerSynchronizer] synchronizes configured properties to all peers.
Visibility can be handled directly with [method set_visibility_for] or as-needed with [method add_visibility_filter] and [method update_visibility].
[MultiplayerSpawner]s will handle nodes according to visibility of synchronizers as long as the node at [member root_path] was spawned by one.
Internally, [MultiplayerSynchronizer] uses [method MultiplayerAPI.object_configuration_add] to notify synchronization start passing the [Node] at [member root_path] as the [code]object[/code] and itself as the [code]configuration[/code], and uses [method MultiplayerAPI.object_configuration_remove] to notify synchronization end in a similar way.
[b]Note:[/b] Synchronization is not supported for [Object] type properties, like [Resource]. Properties that are unique to each peer, like the instance IDs of [Object]s (see [method Object.get_instance_id]) or [RID]s, will also not work in synchronization.
</description>
<tutorials>
</tutorials>
<methods>
<method name="add_visibility_filter">
<return type="void" />
<param index="0" name="filter" type="Callable" />
<description>
Adds a peer visibility filter for this synchronizer.
[param filter] should take a peer ID [int] and return a [bool].
</description>
</method>
<method name="get_visibility_for" qualifiers="const">
<return type="bool" />
<param index="0" name="peer" type="int" />
<description>
Queries the current visibility for peer [param peer].
</description>
</method>
<method name="remove_visibility_filter">
<return type="void" />
<param index="0" name="filter" type="Callable" />
<description>
Removes a peer visibility filter from this synchronizer.
</description>
</method>
<method name="set_visibility_for">
<return type="void" />
<param index="0" name="peer" type="int" />
<param index="1" name="visible" type="bool" />
<description>
Sets the visibility of [param peer] to [param visible]. If [param peer] is [code]0[/code], the value of [member public_visibility] will be updated instead.
</description>
</method>
<method name="update_visibility">
<return type="void" />
<param index="0" name="for_peer" type="int" default="0" />
<description>
Updates the visibility of [param for_peer] according to visibility filters. If [param for_peer] is [code]0[/code] (the default), all peers' visibilties are updated.
</description>
</method>
</methods>
<members>
<member name="delta_interval" type="float" setter="set_delta_interval" getter="get_delta_interval" default="0.0">
Time interval between delta synchronizations. When set to [code]0.0[/code] (the default), delta synchronizations happen every network process frame.
</member>
<member name="public_visibility" type="bool" setter="set_visibility_public" getter="is_visibility_public" default="true">
Whether synchronization should be visible to all peers by default. See [method set_visibility_for] and [method add_visibility_filter] for ways of configuring fine-grained visibility options.
</member>
<member name="replication_config" type="SceneReplicationConfig" setter="set_replication_config" getter="get_replication_config">
Resource containing which properties to synchronize.
</member>
<member name="replication_interval" type="float" setter="set_replication_interval" getter="get_replication_interval" default="0.0">
Time interval between synchronizations. When set to [code]0.0[/code] (the default), synchronizations happen every network process frame.
</member>
<member name="root_path" type="NodePath" setter="set_root_path" getter="get_root_path" default="NodePath("..")">
Node path that replicated properties are relative to.
If [member root_path] was spawned by a [MultiplayerSpawner], the node will be also be spawned and despawned based on this synchronizer visibility options.
</member>
<member name="visibility_update_mode" type="int" setter="set_visibility_update_mode" getter="get_visibility_update_mode" enum="MultiplayerSynchronizer.VisibilityUpdateMode" default="0">
Specifies when visibility filters are updated (see [enum VisibilityUpdateMode] for options).
</member>
</members>
<signals>
<signal name="delta_synchronized">
<description>
Emitted when a new delta synchronization state is received by this synchronizer after the properties have been updated.
</description>
</signal>
<signal name="synchronized">
<description>
Emitted when a new synchronization state is received by this synchronizer after the properties have been updated.
</description>
</signal>
<signal name="visibility_changed">
<param index="0" name="for_peer" type="int" />
<description>
Emitted when visibility of [param for_peer] is updated. See [method update_visibility].
</description>
</signal>
</signals>
<constants>
<constant name="VISIBILITY_PROCESS_IDLE" value="0" enum="VisibilityUpdateMode">
Visibility filters are updated during process frames (see [constant Node.NOTIFICATION_INTERNAL_PROCESS]).
</constant>
<constant name="VISIBILITY_PROCESS_PHYSICS" value="1" enum="VisibilityUpdateMode">
Visibility filters are updated during physics frames (see [constant Node.NOTIFICATION_INTERNAL_PHYSICS_PROCESS]).
</constant>
<constant name="VISIBILITY_PROCESS_NONE" value="2" enum="VisibilityUpdateMode">
Visibility filters are not updated automatically, and must be updated manually by calling [method update_visibility].
</constant>
</constants>
</class>
| |
127933
|
<?xml version="1.0" encoding="UTF-8" ?>
<class name="MultiplayerSpawner" inherits="Node" keywords="network" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="../../../doc/class.xsd">
<brief_description>
Automatically replicates spawnable nodes from the authority to other multiplayer peers.
</brief_description>
<description>
Spawnable scenes can be configured in the editor or through code (see [method add_spawnable_scene]).
Also supports custom node spawns through [method spawn], calling [member spawn_function] on all peers.
Internally, [MultiplayerSpawner] uses [method MultiplayerAPI.object_configuration_add] to notify spawns passing the spawned node as the [code]object[/code] and itself as the [code]configuration[/code], and [method MultiplayerAPI.object_configuration_remove] to notify despawns in a similar way.
</description>
<tutorials>
</tutorials>
<methods>
<method name="add_spawnable_scene">
<return type="void" />
<param index="0" name="path" type="String" />
<description>
Adds a scene path to spawnable scenes, making it automatically replicated from the multiplayer authority to other peers when added as children of the node pointed by [member spawn_path].
</description>
</method>
<method name="clear_spawnable_scenes">
<return type="void" />
<description>
Clears all spawnable scenes. Does not despawn existing instances on remote peers.
</description>
</method>
<method name="get_spawnable_scene" qualifiers="const">
<return type="String" />
<param index="0" name="index" type="int" />
<description>
Returns the spawnable scene path by index.
</description>
</method>
<method name="get_spawnable_scene_count" qualifiers="const">
<return type="int" />
<description>
Returns the count of spawnable scene paths.
</description>
</method>
<method name="spawn">
<return type="Node" />
<param index="0" name="data" type="Variant" default="null" />
<description>
Requests a custom spawn, with [param data] passed to [member spawn_function] on all peers. Returns the locally spawned node instance already inside the scene tree, and added as a child of the node pointed by [member spawn_path].
[b]Note:[/b] Spawnable scenes are spawned automatically. [method spawn] is only needed for custom spawns.
</description>
</method>
</methods>
<members>
<member name="spawn_function" type="Callable" setter="set_spawn_function" getter="get_spawn_function">
Method called on all peers when a custom [method spawn] is requested by the authority. Will receive the [code]data[/code] parameter, and should return a [Node] that is not in the scene tree.
[b]Note:[/b] The returned node should [b]not[/b] be added to the scene with [method Node.add_child]. This is done automatically.
</member>
<member name="spawn_limit" type="int" setter="set_spawn_limit" getter="get_spawn_limit" default="0">
Maximum number of nodes allowed to be spawned by this spawner. Includes both spawnable scenes and custom spawns.
When set to [code]0[/code] (the default), there is no limit.
</member>
<member name="spawn_path" type="NodePath" setter="set_spawn_path" getter="get_spawn_path" default="NodePath("")">
Path to the spawn root. Spawnable scenes that are added as direct children are replicated to other peers.
</member>
</members>
<signals>
<signal name="despawned">
<param index="0" name="node" type="Node" />
<description>
Emitted when a spawnable scene or custom spawn was despawned by the multiplayer authority. Only called on remote peers.
</description>
</signal>
<signal name="spawned">
<param index="0" name="node" type="Node" />
<description>
Emitted when a spawnable scene or custom spawn was spawned by the multiplayer authority. Only called on remote peers.
</description>
</signal>
</signals>
</class>
| |
127935
|
<?xml version="1.0" encoding="UTF-8" ?>
<class name="SceneMultiplayer" inherits="MultiplayerAPI" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="../../../doc/class.xsd">
<brief_description>
High-level multiplayer API implementation.
</brief_description>
<description>
This class is the default implementation of [MultiplayerAPI], used to provide multiplayer functionalities in Godot Engine.
This implementation supports RPCs via [method Node.rpc] and [method Node.rpc_id] and requires [method MultiplayerAPI.rpc] to be passed a [Node] (it will fail for other object types).
This implementation additionally provide [SceneTree] replication via the [MultiplayerSpawner] and [MultiplayerSynchronizer] nodes, and the [SceneReplicationConfig] resource.
[b]Note:[/b] The high-level multiplayer API protocol is an implementation detail and isn't meant to be used by non-Godot servers. It may change without notice.
[b]Note:[/b] When exporting to Android, make sure to enable the [code]INTERNET[/code] permission in the Android export preset before exporting the project or using one-click deploy. Otherwise, network communication of any kind will be blocked by Android.
</description>
<tutorials>
</tutorials>
<methods>
<method name="clear">
<return type="void" />
<description>
Clears the current SceneMultiplayer network state (you shouldn't call this unless you know what you are doing).
</description>
</method>
<method name="complete_auth">
<return type="int" enum="Error" />
<param index="0" name="id" type="int" />
<description>
Mark the authentication step as completed for the remote peer identified by [param id]. The [signal MultiplayerAPI.peer_connected] signal will be emitted for this peer once the remote side also completes the authentication. No further authentication messages are expected to be received from this peer.
If a peer disconnects before completing authentication, either due to a network issue, the [member auth_timeout] expiring, or manually calling [method disconnect_peer], the [signal peer_authentication_failed] signal will be emitted instead of [signal MultiplayerAPI.peer_disconnected].
</description>
</method>
<method name="disconnect_peer">
<return type="void" />
<param index="0" name="id" type="int" />
<description>
Disconnects the peer identified by [param id], removing it from the list of connected peers, and closing the underlying connection with it.
</description>
</method>
<method name="get_authenticating_peers">
<return type="PackedInt32Array" />
<description>
Returns the IDs of the peers currently trying to authenticate with this [MultiplayerAPI].
</description>
</method>
<method name="send_auth">
<return type="int" enum="Error" />
<param index="0" name="id" type="int" />
<param index="1" name="data" type="PackedByteArray" />
<description>
Sends the specified [param data] to the remote peer identified by [param id] as part of an authentication message. This can be used to authenticate peers, and control when [signal MultiplayerAPI.peer_connected] is emitted (and the remote peer accepted as one of the connected peers).
</description>
</method>
<method name="send_bytes">
<return type="int" enum="Error" />
<param index="0" name="bytes" type="PackedByteArray" />
<param index="1" name="id" type="int" default="0" />
<param index="2" name="mode" type="int" enum="MultiplayerPeer.TransferMode" default="2" />
<param index="3" name="channel" type="int" default="0" />
<description>
Sends the given raw [param bytes] to a specific peer identified by [param id] (see [method MultiplayerPeer.set_target_peer]). Default ID is [code]0[/code], i.e. broadcast to all peers.
</description>
</method>
</methods>
<members>
<member name="allow_object_decoding" type="bool" setter="set_allow_object_decoding" getter="is_object_decoding_allowed" default="false">
If [code]true[/code], the MultiplayerAPI will allow encoding and decoding of object during RPCs.
[b]Warning:[/b] Deserialized objects can contain code which gets executed. Do not use this option if the serialized object comes from untrusted sources to avoid potential security threat such as remote code execution.
</member>
<member name="auth_callback" type="Callable" setter="set_auth_callback" getter="get_auth_callback" default="Callable()">
The callback to execute when receiving authentication data sent via [method send_auth]. If the [Callable] is empty (default), peers will be automatically accepted as soon as they connect.
</member>
<member name="auth_timeout" type="float" setter="set_auth_timeout" getter="get_auth_timeout" default="3.0">
If set to a value greater than [code]0.0[/code], the maximum duration in seconds peers can stay in the authenticating state, after which the authentication will automatically fail. See the [signal peer_authenticating] and [signal peer_authentication_failed] signals.
</member>
<member name="max_delta_packet_size" type="int" setter="set_max_delta_packet_size" getter="get_max_delta_packet_size" default="65535">
Maximum size of each delta packet. Higher values increase the chance of receiving full updates in a single frame, but also the chance of causing networking congestion (higher latency, disconnections). See [MultiplayerSynchronizer].
</member>
<member name="max_sync_packet_size" type="int" setter="set_max_sync_packet_size" getter="get_max_sync_packet_size" default="1350">
Maximum size of each synchronization packet. Higher values increase the chance of receiving full updates in a single frame, but also the chance of packet loss. See [MultiplayerSynchronizer].
</member>
<member name="refuse_new_connections" type="bool" setter="set_refuse_new_connections" getter="is_refusing_new_connections" default="false">
If [code]true[/code], the MultiplayerAPI's [member MultiplayerAPI.multiplayer_peer] refuses new incoming connections.
</member>
<member name="root_path" type="NodePath" setter="set_root_path" getter="get_root_path" default="NodePath("")">
The root path to use for RPCs and replication. Instead of an absolute path, a relative path will be used to find the node upon which the RPC should be executed.
This effectively allows to have different branches of the scene tree to be managed by different MultiplayerAPI, allowing for example to run both client and server in the same scene.
</member>
<member name="server_relay" type="bool" setter="set_server_relay_enabled" getter="is_server_relay_enabled" default="true">
Enable or disable the server feature that notifies clients of other peers' connection/disconnection, and relays messages between them. When this option is [code]false[/code], clients won't be automatically notified of other peers and won't be able to send them packets through the server.
[b]Note:[/b] Changing this option while other peers are connected may lead to unexpected behaviors.
[b]Note:[/b] Support for this feature may depend on the current [MultiplayerPeer] configuration. See [method MultiplayerPeer.is_server_relay_supported].
</member>
</members>
| |
128130
|
void canvas_item_set_clip(RID p_item, bool p_clip);
void canvas_item_set_distance_field_mode(RID p_item, bool p_enable);
void canvas_item_set_custom_rect(RID p_item, bool p_custom_rect, const Rect2 &p_rect = Rect2());
void canvas_item_set_modulate(RID p_item, const Color &p_color);
void canvas_item_set_self_modulate(RID p_item, const Color &p_color);
void canvas_item_set_draw_behind_parent(RID p_item, bool p_enable);
void canvas_item_set_update_when_visible(RID p_item, bool p_update);
void canvas_item_add_line(RID p_item, const Point2 &p_from, const Point2 &p_to, const Color &p_color, float p_width = -1.0, bool p_antialiased = false);
void canvas_item_add_polyline(RID p_item, const Vector<Point2> &p_points, const Vector<Color> &p_colors, float p_width = -1.0, bool p_antialiased = false);
void canvas_item_add_multiline(RID p_item, const Vector<Point2> &p_points, const Vector<Color> &p_colors, float p_width = -1.0, bool p_antialiased = false);
void canvas_item_add_rect(RID p_item, const Rect2 &p_rect, const Color &p_color, bool p_antialiased);
void canvas_item_add_circle(RID p_item, const Point2 &p_pos, float p_radius, const Color &p_color, bool p_antialiased);
void canvas_item_add_texture_rect(RID p_item, const Rect2 &p_rect, RID p_texture, bool p_tile = false, const Color &p_modulate = Color(1, 1, 1), bool p_transpose = false);
void canvas_item_add_texture_rect_region(RID p_item, const Rect2 &p_rect, RID p_texture, const Rect2 &p_src_rect, const Color &p_modulate = Color(1, 1, 1), bool p_transpose = false, bool p_clip_uv = false);
void canvas_item_add_msdf_texture_rect_region(RID p_item, const Rect2 &p_rect, RID p_texture, const Rect2 &p_src_rect, const Color &p_modulate = Color(1, 1, 1), int p_outline_size = 0, float p_px_range = 1.0, float p_scale = 1.0);
void canvas_item_add_lcd_texture_rect_region(RID p_item, const Rect2 &p_rect, RID p_texture, const Rect2 &p_src_rect, const Color &p_modulate = Color(1, 1, 1));
void canvas_item_add_nine_patch(RID p_item, const Rect2 &p_rect, const Rect2 &p_source, RID p_texture, const Vector2 &p_topleft, const Vector2 &p_bottomright, RS::NinePatchAxisMode p_x_axis_mode = RS::NINE_PATCH_STRETCH, RS::NinePatchAxisMode p_y_axis_mode = RS::NINE_PATCH_STRETCH, bool p_draw_center = true, const Color &p_modulate = Color(1, 1, 1));
void canvas_item_add_primitive(RID p_item, const Vector<Point2> &p_points, const Vector<Color> &p_colors, const Vector<Point2> &p_uvs, RID p_texture);
void canvas_item_add_polygon(RID p_item, const Vector<Point2> &p_points, const Vector<Color> &p_colors, const Vector<Point2> &p_uvs = Vector<Point2>(), RID p_texture = RID());
void canvas_item_add_triangle_array(RID p_item, const Vector<int> &p_indices, const Vector<Point2> &p_points, const Vector<Color> &p_colors, const Vector<Point2> &p_uvs = Vector<Point2>(), const Vector<int> &p_bones = Vector<int>(), const Vector<float> &p_weights = Vector<float>(), RID p_texture = RID(), int p_count = -1);
void canvas_item_add_mesh(RID p_item, const RID &p_mesh, const Transform2D &p_transform = Transform2D(), const Color &p_modulate = Color(1, 1, 1), RID p_texture = RID());
void canvas_item_add_multimesh(RID p_item, RID p_mesh, RID p_texture = RID());
void canvas_item_add_particles(RID p_item, RID p_particles, RID p_texture);
void canvas_item_add_set_transform(RID p_item, const Transform2D &p_transform);
void canvas_item_add_clip_ignore(RID p_item, bool p_ignore);
void canvas_item_add_animation_slice(RID p_item, double p_animation_length, double p_slice_begin, double p_slice_end, double p_offset);
void canvas_item_set_sort_children_by_y(RID p_item, bool p_enable);
void canvas_item_set_z_index(RID p_item, int p_z);
void canvas_item_set_z_as_relative_to_parent(RID p_item, bool p_enable);
void canvas_item_set_copy_to_backbuffer(RID p_item, bool p_enable, const Rect2 &p_rect);
void canvas_item_attach_skeleton(RID p_item, RID p_skeleton);
void canvas_item_clear(RID p_item);
void canvas_item_set_draw_index(RID p_item, int p_index);
void canvas_item_set_material(RID p_item, RID p_material);
void canvas_item_set_use_parent_material(RID p_item, bool p_enable);
void canvas_item_set_visibility_notifier(RID p_item, bool p_enable, const Rect2 &p_area, const Callable &p_enter_callable, const Callable &p_exit_callable);
void canvas_item_set_canvas_group_mode(RID p_item, RS::CanvasGroupMode p_mode, float p_clear_margin = 5.0, bool p_fit_empty = false, float p_fit_margin = 0.0, bool p_blur_mipmaps = false);
void canvas_item_set_debug_redraw(bool p_enabled);
| |
130896
|
inline static void XrQuaternionf_Multiply(XrQuaternionf* result, const XrQuaternionf* a, const XrQuaternionf* b) {
result->x = (b->w * a->x) + (b->x * a->w) + (b->y * a->z) - (b->z * a->y);
result->y = (b->w * a->y) - (b->x * a->z) + (b->y * a->w) + (b->z * a->x);
result->z = (b->w * a->z) + (b->x * a->y) - (b->y * a->x) + (b->z * a->w);
result->w = (b->w * a->w) - (b->x * a->x) - (b->y * a->y) - (b->z * a->z);
}
inline static void XrQuaternionf_Invert(XrQuaternionf* result, const XrQuaternionf* q) {
result->x = -q->x;
result->y = -q->y;
result->z = -q->z;
result->w = q->w;
}
inline static void XrQuaternionf_Normalize(XrQuaternionf* q) {
const float lengthRcp = XrRcpSqrt(q->x * q->x + q->y * q->y + q->z * q->z + q->w * q->w);
q->x *= lengthRcp;
q->y *= lengthRcp;
q->z *= lengthRcp;
q->w *= lengthRcp;
}
inline static void XrQuaternionf_RotateVector3f(XrVector3f* result, const XrQuaternionf* a, const XrVector3f* v) {
XrQuaternionf q = {v->x, v->y, v->z, 0.0f};
XrQuaternionf aq;
XrQuaternionf_Multiply(&aq, &q, a);
XrQuaternionf aInv;
XrQuaternionf_Invert(&aInv, a);
XrQuaternionf aqaInv;
XrQuaternionf_Multiply(&aqaInv, &aInv, &aq);
result->x = aqaInv.x;
result->y = aqaInv.y;
result->z = aqaInv.z;
}
inline static void XrPosef_CreateIdentity(XrPosef* result) {
XrQuaternionf_CreateIdentity(&result->orientation);
XrVector3f_Set(&result->position, 0);
}
inline static void XrPosef_TransformVector3f(XrVector3f* result, const XrPosef* a, const XrVector3f* v) {
XrVector3f r0;
XrQuaternionf_RotateVector3f(&r0, &a->orientation, v);
XrVector3f_Add(result, &r0, &a->position);
}
inline static void XrPosef_Multiply(XrPosef* result, const XrPosef* a, const XrPosef* b) {
XrQuaternionf_Multiply(&result->orientation, &b->orientation, &a->orientation);
XrPosef_TransformVector3f(&result->position, a, &b->position);
}
inline static void XrPosef_Invert(XrPosef* result, const XrPosef* a) {
XrQuaternionf_Invert(&result->orientation, &a->orientation);
XrVector3f aPosNeg;
XrVector3f_Scale(&aPosNeg, &a->position, -1.0f);
XrQuaternionf_RotateVector3f(&result->position, &result->orientation, &aPosNeg);
}
// Use left-multiplication to accumulate transformations.
inline static void XrMatrix4x4f_Multiply(XrMatrix4x4f* result, const XrMatrix4x4f* a, const XrMatrix4x4f* b) {
result->m[0] = a->m[0] * b->m[0] + a->m[4] * b->m[1] + a->m[8] * b->m[2] + a->m[12] * b->m[3];
result->m[1] = a->m[1] * b->m[0] + a->m[5] * b->m[1] + a->m[9] * b->m[2] + a->m[13] * b->m[3];
result->m[2] = a->m[2] * b->m[0] + a->m[6] * b->m[1] + a->m[10] * b->m[2] + a->m[14] * b->m[3];
result->m[3] = a->m[3] * b->m[0] + a->m[7] * b->m[1] + a->m[11] * b->m[2] + a->m[15] * b->m[3];
result->m[4] = a->m[0] * b->m[4] + a->m[4] * b->m[5] + a->m[8] * b->m[6] + a->m[12] * b->m[7];
result->m[5] = a->m[1] * b->m[4] + a->m[5] * b->m[5] + a->m[9] * b->m[6] + a->m[13] * b->m[7];
result->m[6] = a->m[2] * b->m[4] + a->m[6] * b->m[5] + a->m[10] * b->m[6] + a->m[14] * b->m[7];
result->m[7] = a->m[3] * b->m[4] + a->m[7] * b->m[5] + a->m[11] * b->m[6] + a->m[15] * b->m[7];
result->m[8] = a->m[0] * b->m[8] + a->m[4] * b->m[9] + a->m[8] * b->m[10] + a->m[12] * b->m[11];
result->m[9] = a->m[1] * b->m[8] + a->m[5] * b->m[9] + a->m[9] * b->m[10] + a->m[13] * b->m[11];
result->m[10] = a->m[2] * b->m[8] + a->m[6] * b->m[9] + a->m[10] * b->m[10] + a->m[14] * b->m[11];
result->m[11] = a->m[3] * b->m[8] + a->m[7] * b->m[9] + a->m[11] * b->m[10] + a->m[15] * b->m[11];
result->m[12] = a->m[0] * b->m[12] + a->m[4] * b->m[13] + a->m[8] * b->m[14] + a->m[12] * b->m[15];
result->m[13] = a->m[1] * b->m[12] + a->m[5] * b->m[13] + a->m[9] * b->m[14] + a->m[13] * b->m[15];
result->m[14] = a->m[2] * b->m[12] + a->m[6] * b->m[13] + a->m[10] * b->m[14] + a->m[14] * b->m[15];
result->m[15] = a->m[3] * b->m[12] + a->m[7] * b->m[13] + a->m[11] * b->m[14] + a->m[15] * b->m[15];
}
// Creates the transpose of the given matrix.
| |
132104
|
// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "vec3.h"
#include "vec4.h"
#include "transcendental.h"
namespace embree
{
////////////////////////////////////////////////////////////////
// Quaternion Struct
////////////////////////////////////////////////////////////////
template<typename T>
struct QuaternionT
{
typedef Vec3<T> Vector;
////////////////////////////////////////////////////////////////////////////////
/// Construction
////////////////////////////////////////////////////////////////////////////////
__forceinline QuaternionT () { }
__forceinline QuaternionT ( const QuaternionT& other ) { r = other.r; i = other.i; j = other.j; k = other.k; }
__forceinline QuaternionT& operator=( const QuaternionT& other ) { r = other.r; i = other.i; j = other.j; k = other.k; return *this; }
__forceinline QuaternionT( const T& r ) : r(r), i(zero), j(zero), k(zero) {}
__forceinline explicit QuaternionT( const Vec3<T>& v ) : r(zero), i(v.x), j(v.y), k(v.z) {}
__forceinline explicit QuaternionT( const Vec4<T>& v ) : r(v.x), i(v.y), j(v.z), k(v.w) {}
__forceinline QuaternionT( const T& r, const T& i, const T& j, const T& k ) : r(r), i(i), j(j), k(k) {}
__forceinline QuaternionT( const T& r, const Vec3<T>& v ) : r(r), i(v.x), j(v.y), k(v.z) {}
__inline QuaternionT( const Vec3<T>& vx, const Vec3<T>& vy, const Vec3<T>& vz );
__inline QuaternionT( const T& yaw, const T& pitch, const T& roll );
////////////////////////////////////////////////////////////////////////////////
/// Constants
////////////////////////////////////////////////////////////////////////////////
__forceinline QuaternionT( ZeroTy ) : r(zero), i(zero), j(zero), k(zero) {}
__forceinline QuaternionT( OneTy ) : r( one), i(zero), j(zero), k(zero) {}
/*! return quaternion for rotation around arbitrary axis */
static __forceinline QuaternionT rotate(const Vec3<T>& u, const T& r) {
return QuaternionT<T>(cos(T(0.5)*r),sin(T(0.5)*r)*normalize(u));
}
/*! returns the rotation axis of the quaternion as a vector */
__forceinline Vec3<T> v( ) const { return Vec3<T>(i, j, k); }
public:
T r, i, j, k;
};
template<typename T> __forceinline QuaternionT<T> operator *( const T & a, const QuaternionT<T>& b ) { return QuaternionT<T>(a * b.r, a * b.i, a * b.j, a * b.k); }
template<typename T> __forceinline QuaternionT<T> operator *( const QuaternionT<T>& a, const T & b ) { return QuaternionT<T>(a.r * b, a.i * b, a.j * b, a.k * b); }
////////////////////////////////////////////////////////////////
// Unary Operators
////////////////////////////////////////////////////////////////
template<typename T> __forceinline QuaternionT<T> operator +( const QuaternionT<T>& a ) { return QuaternionT<T>(+a.r, +a.i, +a.j, +a.k); }
template<typename T> __forceinline QuaternionT<T> operator -( const QuaternionT<T>& a ) { return QuaternionT<T>(-a.r, -a.i, -a.j, -a.k); }
template<typename T> __forceinline QuaternionT<T> conj ( const QuaternionT<T>& a ) { return QuaternionT<T>(a.r, -a.i, -a.j, -a.k); }
template<typename T> __forceinline T abs ( const QuaternionT<T>& a ) { return sqrt(a.r*a.r + a.i*a.i + a.j*a.j + a.k*a.k); }
template<typename T> __forceinline QuaternionT<T> rcp ( const QuaternionT<T>& a ) { return conj(a)*rcp(a.r*a.r + a.i*a.i + a.j*a.j + a.k*a.k); }
template<typename T> __forceinline QuaternionT<T> normalize ( const QuaternionT<T>& a ) { return a*rsqrt(a.r*a.r + a.i*a.i + a.j*a.j + a.k*a.k); }
// evaluates a*q-r
template<typename T> __forceinline QuaternionT<T>
msub(const T& a, const QuaternionT<T>& q, const QuaternionT<T>& p)
{
return QuaternionT<T>(msub(a, q.r, p.r),
msub(a, q.i, p.i),
msub(a, q.j, p.j),
msub(a, q.k, p.k));
}
// evaluates a*q-r
template<typename T> __forceinline QuaternionT<T>
madd (const T& a, const QuaternionT<T>& q, const QuaternionT<T>& p)
{
return QuaternionT<T>(madd(a, q.r, p.r),
madd(a, q.i, p.i),
madd(a, q.j, p.j),
madd(a, q.k, p.k));
}
////////////////////////////////////////////////////////////////
// Binary Operators
////////////////////////////////////////////////////////////////
template<typename T> __forceinline QuaternionT<T> operator +( const T & a, const QuaternionT<T>& b ) { return QuaternionT<T>(a + b.r, b.i, b.j, b.k); }
template<typename T> __forceinline QuaternionT<T> operator +( const QuaternionT<T>& a, const T & b ) { return QuaternionT<T>(a.r + b, a.i, a.j, a.k); }
template<typename T> __forceinline QuaternionT<T> operator +( const QuaternionT<T>& a, const QuaternionT<T>& b ) { return QuaternionT<T>(a.r + b.r, a.i + b.i, a.j + b.j, a.k + b.k); }
template<typename T> __forceinline QuaternionT<T> operator -( const T & a, const QuaternionT<T>& b ) { return QuaternionT<T>(a - b.r, -b.i, -b.j, -b.k); }
template<typename T> __forceinline QuaternionT<T> operator -( const QuaternionT<T>& a, const T & b ) { return QuaternionT<T>(a.r - b, a.i, a.j, a.k); }
template<typename T> __forceinline QuaternionT<T> operator -( const QuaternionT<T>& a, const QuaternionT<T>& b ) { return QuaternionT<T>(a.r - b.r, a.i - b.i, a.j - b.j, a.k - b.k); }
template<typename T> __forceinline Vec3<T> operator *( const QuaternionT<T>& a, const Vec3<T> & b ) { return (a*QuaternionT<T>(b)*conj(a)).v(); }
template<typename T> __forceinline QuaternionT<T> operator *( const QuaternionT<T>& a, const QuaternionT<T>& b ) {
return QuaternionT<T>(a.r*b.r - a.i*b.i - a.j*b.j - a.k*b.k,
a.r*b.i + a.i*b.r + a.j*b.k - a.k*b.j,
a.r*b.j - a.i*b.k + a.j*b.r + a.k*b.i,
a.r*b.k + a.i*b.j - a.j*b.i + a.k*b.r);
}
template<typename T> __forceinline QuaternionT<T> operator /( const T & a, const QuaternionT<T>& b ) { return a*rcp(b); }
template<typename T> __forceinline QuaternionT<T> operator /( const QuaternionT<T>& a, const T & b ) { return a*rcp(b); }
template<typename T> __forceinline QuaternionT<T> operator /( const QuaternionT<T>& a, const QuaternionT<T>& b ) { return a*rcp(b); }
template<typename T> __forceinline QuaternionT<T>& operator +=( QuaternionT<T>& a, const T & b ) { return a = a+b; }
template<typename T> __forceinline QuaternionT<T>& operator +=( QuaternionT<T>& a, const QuaternionT<T>& b ) { return a = a+b; }
template<typename T> __forceinline QuaternionT<T>& operator -=( QuaternionT<T>& a, const T & b ) { return a = a-b; }
template<typename T> __forceinline QuaternionT<T>& operator -=( QuaternionT<T>& a, const QuaternionT<T>& b ) { return a = a-b; }
template<typename T> __forceinline QuaternionT<T>& operator *=( QuaternionT<T>& a, const T & b ) { return a = a*b; }
| |
138427
|
inline void rcVcross(float* dest, const float* v1, const float* v2)
{
dest[0] = v1[1]*v2[2] - v1[2]*v2[1];
dest[1] = v1[2]*v2[0] - v1[0]*v2[2];
dest[2] = v1[0]*v2[1] - v1[1]*v2[0];
}
/// Derives the dot product of two vectors. (@p v1 . @p v2)
/// @param[in] v1 A Vector [(x, y, z)]
/// @param[in] v2 A vector [(x, y, z)]
/// @return The dot product.
inline float rcVdot(const float* v1, const float* v2)
{
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2];
}
/// Performs a scaled vector addition. (@p v1 + (@p v2 * @p s))
/// @param[out] dest The result vector. [(x, y, z)]
/// @param[in] v1 The base vector. [(x, y, z)]
/// @param[in] v2 The vector to scale and add to @p v1. [(x, y, z)]
/// @param[in] s The amount to scale @p v2 by before adding to @p v1.
inline void rcVmad(float* dest, const float* v1, const float* v2, const float s)
{
dest[0] = v1[0]+v2[0]*s;
dest[1] = v1[1]+v2[1]*s;
dest[2] = v1[2]+v2[2]*s;
}
/// Performs a vector addition. (@p v1 + @p v2)
/// @param[out] dest The result vector. [(x, y, z)]
/// @param[in] v1 The base vector. [(x, y, z)]
/// @param[in] v2 The vector to add to @p v1. [(x, y, z)]
inline void rcVadd(float* dest, const float* v1, const float* v2)
{
dest[0] = v1[0]+v2[0];
dest[1] = v1[1]+v2[1];
dest[2] = v1[2]+v2[2];
}
/// Performs a vector subtraction. (@p v1 - @p v2)
/// @param[out] dest The result vector. [(x, y, z)]
/// @param[in] v1 The base vector. [(x, y, z)]
/// @param[in] v2 The vector to subtract from @p v1. [(x, y, z)]
inline void rcVsub(float* dest, const float* v1, const float* v2)
{
dest[0] = v1[0]-v2[0];
dest[1] = v1[1]-v2[1];
dest[2] = v1[2]-v2[2];
}
/// Selects the minimum value of each element from the specified vectors.
/// @param[in,out] mn A vector. (Will be updated with the result.) [(x, y, z)]
/// @param[in] v A vector. [(x, y, z)]
inline void rcVmin(float* mn, const float* v)
{
mn[0] = rcMin(mn[0], v[0]);
mn[1] = rcMin(mn[1], v[1]);
mn[2] = rcMin(mn[2], v[2]);
}
/// Selects the maximum value of each element from the specified vectors.
/// @param[in,out] mx A vector. (Will be updated with the result.) [(x, y, z)]
/// @param[in] v A vector. [(x, y, z)]
inline void rcVmax(float* mx, const float* v)
{
mx[0] = rcMax(mx[0], v[0]);
mx[1] = rcMax(mx[1], v[1]);
mx[2] = rcMax(mx[2], v[2]);
}
/// Performs a vector copy.
/// @param[out] dest The result. [(x, y, z)]
/// @param[in] v The vector to copy. [(x, y, z)]
inline void rcVcopy(float* dest, const float* v)
{
dest[0] = v[0];
dest[1] = v[1];
dest[2] = v[2];
}
/// Returns the distance between two points.
/// @param[in] v1 A point. [(x, y, z)]
/// @param[in] v2 A point. [(x, y, z)]
/// @return The distance between the two points.
inline float rcVdist(const float* v1, const float* v2)
{
float dx = v2[0] - v1[0];
float dy = v2[1] - v1[1];
float dz = v2[2] - v1[2];
return rcSqrt(dx*dx + dy*dy + dz*dz);
}
/// Returns the square of the distance between two points.
/// @param[in] v1 A point. [(x, y, z)]
/// @param[in] v2 A point. [(x, y, z)]
/// @return The square of the distance between the two points.
inline float rcVdistSqr(const float* v1, const float* v2)
{
float dx = v2[0] - v1[0];
float dy = v2[1] - v1[1];
float dz = v2[2] - v1[2];
return dx*dx + dy*dy + dz*dz;
}
/// Normalizes the vector.
/// @param[in,out] v The vector to normalize. [(x, y, z)]
inline void rcVnormalize(float* v)
{
float d = 1.0f / rcSqrt(rcSqr(v[0]) + rcSqr(v[1]) + rcSqr(v[2]));
v[0] *= d;
v[1] *= d;
v[2] *= d;
}
/// @}
/// @name Heightfield Functions
/// @see rcHeightfield
/// @{
/// Calculates the bounding box of an array of vertices.
/// @ingroup recast
/// @param[in] verts An array of vertices. [(x, y, z) * @p nv]
/// @param[in] numVerts The number of vertices in the @p verts array.
/// @param[out] minBounds The minimum bounds of the AABB. [(x, y, z)] [Units: wu]
/// @param[out] maxBounds The maximum bounds of the AABB. [(x, y, z)] [Units: wu]
void rcCalcBounds(const float* verts, int numVerts, float* minBounds, float* maxBounds);
/// Calculates the grid size based on the bounding box and grid cell size.
/// @ingroup recast
/// @param[in] minBounds The minimum bounds of the AABB. [(x, y, z)] [Units: wu]
/// @param[in] maxBounds The maximum bounds of the AABB. [(x, y, z)] [Units: wu]
/// @param[in] cellSize The xz-plane cell size. [Limit: > 0] [Units: wu]
/// @param[out] sizeX The width along the x-axis. [Limit: >= 0] [Units: vx]
/// @param[out] sizeZ The height along the z-axis. [Limit: >= 0] [Units: vx]
void rcCalcGridSize(const float* minBounds, const float* maxBounds, float cellSize, int* sizeX, int* sizeZ);
/// Initializes a new heightfield.
/// See the #rcConfig documentation for more information on the configuration parameters.
///
/// @see rcAllocHeightfield, rcHeightfield
/// @ingroup recast
///
/// @param[in,out] context The build context to use during the operation.
/// @param[in,out] heightfield The allocated heightfield to initialize.
/// @param[in] sizeX The width of the field along the x-axis. [Limit: >= 0] [Units: vx]
/// @param[in] sizeZ The height of the field along the z-axis. [Limit: >= 0] [Units: vx]
/// @param[in] minBounds The minimum bounds of the field's AABB. [(x, y, z)] [Units: wu]
/// @param[in] maxBounds The maximum bounds of the field's AABB. [(x, y, z)] [Units: wu]
| |
138454
|
namespace VHACD {
// -- GODOT end --
/**@brief btVector3 can be used to represent 3D points and vectors.
* It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user
* Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers
*/
ATTRIBUTE_ALIGNED16(class)
btVector3
{
public:
#if defined(__SPU__) && defined(__CELLOS_LV2__)
btScalar m_floats[4];
public:
SIMD_FORCE_INLINE const vec_float4& get128() const
{
return *((const vec_float4*)&m_floats[0]);
}
public:
#else //__CELLOS_LV2__ __SPU__
#ifdef BT_USE_SSE // _WIN32
union {
__m128 mVec128;
btScalar m_floats[4];
};
SIMD_FORCE_INLINE __m128 get128() const
{
return mVec128;
}
SIMD_FORCE_INLINE void set128(__m128 v128)
{
mVec128 = v128;
}
#else
btScalar m_floats[4];
#endif
#endif //__CELLOS_LV2__ __SPU__
public:
/**@brief No initialization constructor */
SIMD_FORCE_INLINE btVector3() {}
/**@brief Constructor from scalars
* @param x X value
* @param y Y value
* @param z Z value
*/
SIMD_FORCE_INLINE btVector3(const btScalar& x, const btScalar& y, const btScalar& z)
{
m_floats[0] = x;
m_floats[1] = y;
m_floats[2] = z;
m_floats[3] = btScalar(0.);
}
/**@brief Add a vector to this one
* @param The vector to add to this one */
SIMD_FORCE_INLINE btVector3& operator+=(const btVector3& v)
{
m_floats[0] += v.m_floats[0];
m_floats[1] += v.m_floats[1];
m_floats[2] += v.m_floats[2];
return *this;
}
/**@brief Subtract a vector from this one
* @param The vector to subtract */
SIMD_FORCE_INLINE btVector3& operator-=(const btVector3& v)
{
m_floats[0] -= v.m_floats[0];
m_floats[1] -= v.m_floats[1];
m_floats[2] -= v.m_floats[2];
return *this;
}
/**@brief Scale the vector
* @param s Scale factor */
SIMD_FORCE_INLINE btVector3& operator*=(const btScalar& s)
{
m_floats[0] *= s;
m_floats[1] *= s;
m_floats[2] *= s;
return *this;
}
/**@brief Inversely scale the vector
* @param s Scale factor to divide by */
SIMD_FORCE_INLINE btVector3& operator/=(const btScalar& s)
{
btFullAssert(s != btScalar(0.0));
return * this *= btScalar(1.0) / s;
}
/**@brief Return the dot product
* @param v The other vector in the dot product */
SIMD_FORCE_INLINE btScalar dot(const btVector3& v) const
{
return m_floats[0] * v.m_floats[0] + m_floats[1] * v.m_floats[1] + m_floats[2] * v.m_floats[2];
}
/**@brief Return the length of the vector squared */
SIMD_FORCE_INLINE btScalar length2() const
{
return dot(*this);
}
/**@brief Return the length of the vector */
SIMD_FORCE_INLINE btScalar length() const
{
return btSqrt(length2());
}
/**@brief Return the distance squared between the ends of this and another vector
* This is symantically treating the vector like a point */
SIMD_FORCE_INLINE btScalar distance2(const btVector3& v) const;
/**@brief Return the distance between the ends of this and another vector
* This is symantically treating the vector like a point */
SIMD_FORCE_INLINE btScalar distance(const btVector3& v) const;
SIMD_FORCE_INLINE btVector3& safeNormalize()
{
btVector3 absVec = this->absolute();
int32_t maxIndex = absVec.maxAxis();
if (absVec[maxIndex] > 0) {
*this /= absVec[maxIndex];
return * this /= length();
}
setValue(1, 0, 0);
return *this;
}
/**@brief Normalize this vector
* x^2 + y^2 + z^2 = 1 */
SIMD_FORCE_INLINE btVector3& normalize()
{
return * this /= length();
}
/**@brief Return a normalized version of this vector */
SIMD_FORCE_INLINE btVector3 normalized() const;
/**@brief Return a rotated version of this vector
* @param wAxis The axis to rotate about
* @param angle The angle to rotate by */
SIMD_FORCE_INLINE btVector3 rotate(const btVector3& wAxis, const btScalar angle) const;
/**@brief Return the angle between this and another vector
* @param v The other vector */
SIMD_FORCE_INLINE btScalar angle(const btVector3& v) const
{
btScalar s = btSqrt(length2() * v.length2());
btFullAssert(s != btScalar(0.0));
return btAcos(dot(v) / s);
}
/**@brief Return a vector will the absolute values of each element */
SIMD_FORCE_INLINE btVector3 absolute() const
{
return btVector3(
btFabs(m_floats[0]),
btFabs(m_floats[1]),
btFabs(m_floats[2]));
}
/**@brief Return the cross product between this and another vector
* @param v The other vector */
SIMD_FORCE_INLINE btVector3 cross(const btVector3& v) const
{
return btVector3(
m_floats[1] * v.m_floats[2] - m_floats[2] * v.m_floats[1],
m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2],
m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]);
}
SIMD_FORCE_INLINE btScalar triple(const btVector3& v1, const btVector3& v2) const
{
return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) + m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) + m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]);
}
/**@brief Return the axis with the smallest value
* Note return values are 0,1,2 for x, y, or z */
SIMD_FORCE_INLINE int32_t minAxis() const
{
return m_floats[0] < m_floats[1] ? (m_floats[0] < m_floats[2] ? 0 : 2) : (m_floats[1] < m_floats[2] ? 1 : 2);
}
/**@brief Return the axis with the largest value
* Note return values are 0,1,2 for x, y, or z */
SIMD_FORCE_INLINE int32_t maxAxis() const
{
return m_floats[0] < m_floats[1] ? (m_floats[1] < m_floats[2] ? 2 : 1) : (m_floats[0] < m_floats[2] ? 2 : 0);
}
SIMD_FORCE_INLINE int32_t furthestAxis() const
{
return absolute().minAxis();
}
SIMD_FORCE_INLINE int32_t closestAxis() const
{
return absolute().maxAxis();
}
| |
138456
|
operator*(const btVector3& v, const btScalar& s)
{
return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s);
}
/**@brief Return the vector scaled by s */
SIMD_FORCE_INLINE btVector3
operator*(const btScalar& s, const btVector3& v)
{
return v * s;
}
/**@brief Return the vector inversely scaled by s */
SIMD_FORCE_INLINE btVector3
operator/(const btVector3& v, const btScalar& s)
{
btFullAssert(s != btScalar(0.0));
return v * (btScalar(1.0) / s);
}
/**@brief Return the vector inversely scaled by s */
SIMD_FORCE_INLINE btVector3
operator/(const btVector3& v1, const btVector3& v2)
{
return btVector3(v1.m_floats[0] / v2.m_floats[0], v1.m_floats[1] / v2.m_floats[1], v1.m_floats[2] / v2.m_floats[2]);
}
/**@brief Return the dot product between two vectors */
SIMD_FORCE_INLINE btScalar
btDot(const btVector3& v1, const btVector3& v2)
{
return v1.dot(v2);
}
/**@brief Return the distance squared between two vectors */
SIMD_FORCE_INLINE btScalar
btDistance2(const btVector3& v1, const btVector3& v2)
{
return v1.distance2(v2);
}
/**@brief Return the distance between two vectors */
SIMD_FORCE_INLINE btScalar
btDistance(const btVector3& v1, const btVector3& v2)
{
return v1.distance(v2);
}
/**@brief Return the angle between two vectors */
SIMD_FORCE_INLINE btScalar
btAngle(const btVector3& v1, const btVector3& v2)
{
return v1.angle(v2);
}
/**@brief Return the cross product of two vectors */
SIMD_FORCE_INLINE btVector3
btCross(const btVector3& v1, const btVector3& v2)
{
return v1.cross(v2);
}
SIMD_FORCE_INLINE btScalar
btTriple(const btVector3& v1, const btVector3& v2, const btVector3& v3)
{
return v1.triple(v2, v3);
}
/**@brief Return the linear interpolation between two vectors
* @param v1 One vector
* @param v2 The other vector
* @param t The ration of this to v (t = 0 => return v1, t=1 => return v2) */
SIMD_FORCE_INLINE btVector3
lerp(const btVector3& v1, const btVector3& v2, const btScalar& t)
{
return v1.lerp(v2, t);
}
SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3& v) const
{
return (v - *this).length2();
}
SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3& v) const
{
return (v - *this).length();
}
SIMD_FORCE_INLINE btVector3 btVector3::normalized() const
{
return *this / length();
}
SIMD_FORCE_INLINE btVector3 btVector3::rotate(const btVector3& wAxis, const btScalar angle) const
{
// wAxis must be a unit lenght vector
btVector3 o = wAxis * wAxis.dot(*this);
btVector3 x = *this - o;
btVector3 y;
y = wAxis.cross(*this);
return (o + x * btCos(angle) + y * btSin(angle));
}
class btVector4 : public btVector3 {
public:
SIMD_FORCE_INLINE btVector4() {}
SIMD_FORCE_INLINE btVector4(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w)
: btVector3(x, y, z)
{
m_floats[3] = w;
}
SIMD_FORCE_INLINE btVector4 absolute4() const
{
return btVector4(
btFabs(m_floats[0]),
btFabs(m_floats[1]),
btFabs(m_floats[2]),
btFabs(m_floats[3]));
}
btScalar getW() const { return m_floats[3]; }
SIMD_FORCE_INLINE int32_t maxAxis4() const
{
int32_t maxIndex = -1;
btScalar maxVal = btScalar(-BT_LARGE_FLOAT);
if (m_floats[0] > maxVal) {
maxIndex = 0;
maxVal = m_floats[0];
}
if (m_floats[1] > maxVal) {
maxIndex = 1;
maxVal = m_floats[1];
}
if (m_floats[2] > maxVal) {
maxIndex = 2;
maxVal = m_floats[2];
}
if (m_floats[3] > maxVal) {
maxIndex = 3;
}
return maxIndex;
}
SIMD_FORCE_INLINE int32_t minAxis4() const
{
int32_t minIndex = -1;
btScalar minVal = btScalar(BT_LARGE_FLOAT);
if (m_floats[0] < minVal) {
minIndex = 0;
minVal = m_floats[0];
}
if (m_floats[1] < minVal) {
minIndex = 1;
minVal = m_floats[1];
}
if (m_floats[2] < minVal) {
minIndex = 2;
minVal = m_floats[2];
}
if (m_floats[3] < minVal) {
minIndex = 3;
}
return minIndex;
}
SIMD_FORCE_INLINE int32_t closestAxis4() const
{
return absolute4().maxAxis4();
}
/**@brief Set x,y,z and zero w
* @param x Value of x
* @param y Value of y
* @param z Value of z
*/
/* void getValue(btScalar *m) const
{
m[0] = m_floats[0];
m[1] = m_floats[1];
m[2] =m_floats[2];
}
*/
/**@brief Set the values
* @param x Value of x
* @param y Value of y
* @param z Value of z
* @param w Value of w
*/
SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w)
{
m_floats[0] = x;
m_floats[1] = y;
m_floats[2] = z;
m_floats[3] = w;
}
};
///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar& sourceVal, btScalar& destVal)
{
#ifdef BT_USE_DOUBLE_PRECISION
unsigned char* dest = (unsigned char*)&destVal;
unsigned char* src = (unsigned char*)&sourceVal;
dest[0] = src[7];
dest[1] = src[6];
dest[2] = src[5];
dest[3] = src[4];
dest[4] = src[3];
dest[5] = src[2];
dest[6] = src[1];
dest[7] = src[0];
#else
unsigned char* dest = (unsigned char*)&destVal;
unsigned char* src = (unsigned char*)&sourceVal;
dest[0] = src[3];
dest[1] = src[2];
dest[2] = src[1];
dest[3] = src[0];
#endif //BT_USE_DOUBLE_PRECISION
}
///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3& sourceVec, btVector3& destVec)
{
for (int32_t i = 0; i < 4; i++) {
btSwapScalarEndian(sourceVec[i], destVec[i]);
}
}
///btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3& vector)
{
btVector3 swappedVec;
for (int32_t i = 0; i < 4; i++) {
btSwapScalarEndian(vector[i], swappedVec[i]);
}
vector = swappedVec;
}
| |
138484
|
void fm_catmullRom(REAL *out_vector,const REAL *p1,const REAL *p2,const REAL *p3,const REAL *p4, const REAL s)
{
REAL s_squared = s * s;
REAL s_cubed = s_squared * s;
REAL coefficient_p1 = -s_cubed + 2*s_squared - s;
REAL coefficient_p2 = 3 * s_cubed - 5 * s_squared + 2;
REAL coefficient_p3 = -3 * s_cubed +4 * s_squared + s;
REAL coefficient_p4 = s_cubed - s_squared;
out_vector[0] = (coefficient_p1 * p1[0] + coefficient_p2 * p2[0] + coefficient_p3 * p3[0] + coefficient_p4 * p4[0])*0.5f;
out_vector[1] = (coefficient_p1 * p1[1] + coefficient_p2 * p2[1] + coefficient_p3 * p3[1] + coefficient_p4 * p4[1])*0.5f;
out_vector[2] = (coefficient_p1 * p1[2] + coefficient_p2 * p2[2] + coefficient_p3 * p3[2] + coefficient_p4 * p4[2])*0.5f;
}
bool fm_intersectAABB(const REAL *bmin1,const REAL *bmax1,const REAL *bmin2,const REAL *bmax2)
{
if ((bmin1[0] > bmax2[0]) || (bmin2[0] > bmax1[0])) return false;
if ((bmin1[1] > bmax2[1]) || (bmin2[1] > bmax1[1])) return false;
if ((bmin1[2] > bmax2[2]) || (bmin2[2] > bmax1[2])) return false;
return true;
}
bool fm_insideAABB(const REAL *obmin,const REAL *obmax,const REAL *tbmin,const REAL *tbmax) // test if bounding box tbmin/tmbax is fully inside obmin/obmax
{
bool ret = false;
if ( tbmax[0] <= obmax[0] &&
tbmax[1] <= obmax[1] &&
tbmax[2] <= obmax[2] &&
tbmin[0] >= obmin[0] &&
tbmin[1] >= obmin[1] &&
tbmin[2] >= obmin[2] ) ret = true;
return ret;
}
// Reference, from Stan Melax in Game Gems I
// Quaternion q;
// vector3 c = CrossProduct(v0,v1);
// REAL d = DotProduct(v0,v1);
// REAL s = (REAL)sqrt((1+d)*2);
// q.x = c.x / s;
// q.y = c.y / s;
// q.z = c.z / s;
// q.w = s /2.0f;
// return q;
void fm_rotationArc(const REAL *v0,const REAL *v1,REAL *quat)
{
REAL cross[3];
fm_cross(cross,v0,v1);
REAL d = fm_dot(v0,v1);
if( d<= -0.99999f ) // 180 about x axis
{
if ( fabsf((float)v0[0]) < 0.1f )
{
quat[0] = 0;
quat[1] = v0[2];
quat[2] = -v0[1];
quat[3] = 0;
}
else
{
quat[0] = v0[1];
quat[1] = -v0[0];
quat[2] = 0;
quat[3] = 0;
}
REAL magnitudeSquared = quat[0]*quat[0] + quat[1]*quat[1] + quat[2]*quat[2] + quat[3]*quat[3];
REAL magnitude = sqrtf((float)magnitudeSquared);
REAL recip = 1.0f / magnitude;
quat[0]*=recip;
quat[1]*=recip;
quat[2]*=recip;
quat[3]*=recip;
}
else
{
REAL s = (REAL)sqrt((1+d)*2);
REAL recip = 1.0f / s;
quat[0] = cross[0] * recip;
quat[1] = cross[1] * recip;
quat[2] = cross[2] * recip;
quat[3] = s * 0.5f;
}
}
REAL fm_distancePointLineSegment(const REAL *Point,const REAL *LineStart,const REAL *LineEnd,REAL *intersection,LineSegmentType &type,REAL epsilon)
{
REAL ret;
REAL LineMag = fm_distance( LineEnd, LineStart );
if ( LineMag > 0 )
{
REAL U = ( ( ( Point[0] - LineStart[0] ) * ( LineEnd[0] - LineStart[0] ) ) + ( ( Point[1] - LineStart[1] ) * ( LineEnd[1] - LineStart[1] ) ) + ( ( Point[2] - LineStart[2] ) * ( LineEnd[2] - LineStart[2] ) ) ) / ( LineMag * LineMag );
if( U < 0.0f || U > 1.0f )
{
REAL d1 = fm_distanceSquared(Point,LineStart);
REAL d2 = fm_distanceSquared(Point,LineEnd);
if ( d1 <= d2 )
{
ret = (REAL)sqrt(d1);
intersection[0] = LineStart[0];
intersection[1] = LineStart[1];
intersection[2] = LineStart[2];
type = LS_START;
}
else
{
ret = (REAL)sqrt(d2);
intersection[0] = LineEnd[0];
intersection[1] = LineEnd[1];
intersection[2] = LineEnd[2];
type = LS_END;
}
}
else
{
intersection[0] = LineStart[0] + U * ( LineEnd[0] - LineStart[0] );
intersection[1] = LineStart[1] + U * ( LineEnd[1] - LineStart[1] );
intersection[2] = LineStart[2] + U * ( LineEnd[2] - LineStart[2] );
ret = fm_distance(Point,intersection);
REAL d1 = fm_distanceSquared(intersection,LineStart);
REAL d2 = fm_distanceSquared(intersection,LineEnd);
REAL mag = (epsilon*2)*(epsilon*2);
if ( d1 < mag ) // if less than 1/100th the total distance, treat is as the 'start'
{
type = LS_START;
}
else if ( d2 < mag )
{
type = LS_END;
}
else
{
type = LS_MIDDLE;
}
}
}
else
{
ret = LineMag;
intersection[0] = LineEnd[0];
intersection[1] = LineEnd[1];
intersection[2] = LineEnd[2];
type = LS_END;
}
return ret;
}
#ifndef BEST_FIT_PLANE_H
#define BEST_FIT_PLANE_H
template <class Type> class Eigen
{
public:
| |
138566
|
float get_scanning_progress() const;
void scan();
void scan_changes();
void update_file(const String &p_file);
void update_files(const Vector<String> &p_script_paths);
HashSet<String> get_valid_extensions() const;
void register_global_class_script(const String &p_search_path, const String &p_target_path);
EditorFileSystemDirectory *get_filesystem_path(const String &p_path);
String get_file_type(const String &p_file) const;
EditorFileSystemDirectory *find_file(const String &p_file, int *r_index) const;
void reimport_files(const Vector<String> &p_files);
Error reimport_append(const String &p_file, const HashMap<StringName, Variant> &p_custom_options, const String &p_custom_importer, Variant p_generator_parameters);
void reimport_file_with_custom_parameters(const String &p_file, const String &p_importer, const HashMap<StringName, Variant> &p_custom_params);
bool is_group_file(const String &p_path) const;
void move_group_file(const String &p_path, const String &p_new_path);
Error make_dir_recursive(const String &p_path, const String &p_base_path = String());
Error copy_file(const String &p_from, const String &p_to);
Error copy_directory(const String &p_from, const String &p_to);
static bool _should_skip_directory(const String &p_path);
void add_import_format_support_query(Ref<EditorFileSystemImportFormatSupportQuery> p_query);
void remove_import_format_support_query(Ref<EditorFileSystemImportFormatSupportQuery> p_query);
EditorFileSystem();
~EditorFileSystem();
};
#endif // EDITOR_FILE_SYSTEM_H
| |
138632
|
/**************************************************************************/
/* import_defaults_editor.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#ifndef IMPORT_DEFAULTS_EDITOR_H
#define IMPORT_DEFAULTS_EDITOR_H
#include "scene/gui/box_container.h"
#include "scene/gui/button.h"
#include "scene/gui/option_button.h"
class ImportDefaultsEditorSettings;
class EditorInspector;
class ImportDefaultsEditor : public VBoxContainer {
GDCLASS(ImportDefaultsEditor, VBoxContainer)
OptionButton *importers = nullptr;
Button *save_defaults = nullptr;
Button *reset_defaults = nullptr;
EditorInspector *inspector = nullptr;
ImportDefaultsEditorSettings *settings = nullptr;
void _update_importer();
void _importer_selected(int p_index);
void _reset();
void _save();
protected:
void _notification(int p_what);
public:
void clear();
ImportDefaultsEditor();
~ImportDefaultsEditor();
};
#endif // IMPORT_DEFAULTS_EDITOR_H
| |
139816
|
/**************************************************************************/
/* editor_import_plugin.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#ifndef EDITOR_IMPORT_PLUGIN_H
#define EDITOR_IMPORT_PLUGIN_H
#include "core/io/resource_importer.h"
#include "core/variant/typed_array.h"
class EditorImportPlugin : public ResourceImporter {
GDCLASS(EditorImportPlugin, ResourceImporter);
protected:
static void _bind_methods();
GDVIRTUAL0RC(String, _get_importer_name)
GDVIRTUAL0RC(String, _get_visible_name)
GDVIRTUAL0RC(int, _get_preset_count)
GDVIRTUAL1RC(String, _get_preset_name, int)
GDVIRTUAL0RC(Vector<String>, _get_recognized_extensions)
GDVIRTUAL2RC(TypedArray<Dictionary>, _get_import_options, String, int)
GDVIRTUAL0RC(String, _get_save_extension)
GDVIRTUAL0RC(String, _get_resource_type)
GDVIRTUAL0RC(float, _get_priority)
GDVIRTUAL0RC(int, _get_import_order)
GDVIRTUAL3RC(bool, _get_option_visibility, String, StringName, Dictionary)
GDVIRTUAL5RC(Error, _import, String, String, Dictionary, TypedArray<String>, TypedArray<String>)
GDVIRTUAL0RC(bool, _can_import_threaded)
Error _append_import_external_resource(const String &p_file, const Dictionary &p_custom_options = Dictionary(), const String &p_custom_importer = String(), Variant p_generator_parameters = Variant());
public:
EditorImportPlugin();
virtual String get_importer_name() const override;
virtual String get_visible_name() const override;
virtual void get_recognized_extensions(List<String> *p_extensions) const override;
virtual String get_preset_name(int p_idx) const override;
virtual int get_preset_count() const override;
virtual String get_save_extension() const override;
virtual String get_resource_type() const override;
virtual float get_priority() const override;
virtual int get_import_order() const override;
virtual void get_import_options(const String &p_path, List<ImportOption> *r_options, int p_preset) const override;
virtual bool get_option_visibility(const String &p_path, const String &p_option, const HashMap<StringName, Variant> &p_options) const override;
virtual Error import(const String &p_source_file, const String &p_save_path, const HashMap<StringName, Variant> &p_options, List<String> *r_platform_variants, List<String> *r_gen_files, Variant *r_metadata = nullptr) override;
virtual bool can_import_threaded() const override;
Error append_import_external_resource(const String &p_file, const HashMap<StringName, Variant> &p_custom_options = HashMap<StringName, Variant>(), const String &p_custom_importer = String(), Variant p_generator_parameters = Variant());
};
#endif // EDITOR_IMPORT_PLUGIN_H
| |
139933
|
defaults:
4:
templates: export_templates.tpz
editor:
android.apk: android_editor.apk
android.horizonos: android_editor_horizonos.apk
linux.64: linux.x86_64.zip
linux.32: linux.x86_32.zip
linux.arm64: linux.arm64.zip
linux.arm32: linux.arm32.zip
macos.universal: macos.universal.zip
windows.64: win64.exe.zip
windows.32: win32.exe.zip
windows.arm64: windows_arm64.exe.zip
web: web_editor.zip
mono:
templates: mono_export_templates.tpz
editor:
linux.64: mono_linux_x86_64.zip
linux.32: mono_linux_x86_32.zip
linux.arm64: mono_linux_arm64.zip
linux.arm32: mono_linux_arm32.zip
macos.universal: mono_macos.universal.zip
windows.64: mono_win64.zip
windows.32: mono_win32.zip
windows.arm64: mono_windows_arm64.zip
extras:
aar_library: template_release.aar
3:
templates: export_templates.tpz
editor:
android.apk: android_editor.apk
linux.64: x11.64.zip
linux.32: x11.32.zip
macos.universal: osx.universal.zip
windows.64: win64.exe.zip
windows.32: win32.exe.zip
linux_server.headless.64: linux_headless.64.zip
linux_server.64: linux_server.64.zip
web: web_editor.zip
extras:
aar_library: release.aar
mono:
templates: mono_export_templates.tpz
editor:
linux.64: mono_x11_64.zip
linux.32: mono_x11_32.zip
macos.universal: mono_osx.universal.zip
windows.64: mono_win64.zip
windows.32: mono_win32.zip
linux_server.headless.64: mono_linux_headless_64.zip
linux_server.64: mono_linux_server_64.zip
extras:
aar_library: mono.release.aar
2:
templates: export_templates.tpz
editor:
linux.64: x11.64.zip
linux.32: x11.32.zip
macos.universal: osx.fat.zip
windows.64: win64.exe.zip
windows.32: win32.exe.zip
linux_server.64: linux_server.64.zip
1:
templates: export_templates.tpz
editor:
linux.64: x11.64.zip
linux.32: x11.32.zip
macos.universal: osx32.zip
windows.64: win64.exe.zip
windows.32: win32.exe.zip
linux_server.64: linux_server.64.zip
overrides:
# Mono version of Godot 4 was only introduced in 4.0 alpha 17.
- version: 4
range:
- "4.0-alpha1"
- "4.0-alpha16"
config:
templates: export_templates.tpz
editor:
android.apk: android_editor.apk
linux.64: linux.x86_64.zip
linux.32: linux.x86_32.zip
macos.universal: macos.universal.zip
windows.64: win64.exe.zip
windows.32: win32.exe.zip
web: web_editor.zip
extras:
aar_library: template_release.aar
# Godot 4.2 beta 5 introduced Linux ARM builds.
- version: 4
range:
- "4.0-alpha17"
- "4.2-beta4"
config:
templates: export_templates.tpz
editor:
android.apk: android_editor.apk
linux.64: linux.x86_64.zip
linux.32: linux.x86_32.zip
macos.universal: macos.universal.zip
windows.64: win64.exe.zip
windows.32: win32.exe.zip
web: web_editor.zip
mono:
templates: mono_export_templates.tpz
editor:
linux.64: mono_linux_x86_64.zip
linux.32: mono_linux_x86_32.zip
macos.universal: mono_macos.universal.zip
windows.64: mono_win64.zip
windows.32: mono_win32.zip
extras:
aar_library: template_release.aar
# Godot 4.3 RC 1 introduced Windows ARM builds.
- version: 4
range:
- "4.2-beta5"
- "4.3-beta3"
config:
templates: export_templates.tpz
editor:
android.apk: android_editor.apk
linux.64: linux.x86_64.zip
linux.32: linux.x86_32.zip
linux.arm64: linux.arm64.zip
linux.arm32: linux.arm32.zip
macos.universal: macos.universal.zip
windows.64: win64.exe.zip
windows.32: win32.exe.zip
web: web_editor.zip
mono:
templates: mono_export_templates.tpz
editor:
linux.64: mono_linux_x86_64.zip
linux.32: mono_linux_x86_32.zip
linux.arm64: mono_linux_arm64.zip
linux.arm32: mono_linux_arm32.zip
macos.universal: mono_macos.universal.zip
windows.64: mono_win64.zip
windows.32: mono_win32.zip
extras:
aar_library: template_release.aar
# Godot 4.4 dev 2 introduced Android Horizon OS builds.
- version: 4
range:
- "4.3-rc1"
- "4.4-dev1"
config:
templates: export_templates.tpz
editor:
android.apk: android_editor.apk
linux.64: linux.x86_64.zip
linux.32: linux.x86_32.zip
linux.arm64: linux.arm64.zip
linux.arm32: linux.arm32.zip
macos.universal: macos.universal.zip
windows.64: win64.exe.zip
windows.32: win32.exe.zip
windows.arm64: windows_arm64.exe.zip
web: web_editor.zip
mono:
templates: mono_export_templates.tpz
editor:
linux.64: mono_linux_x86_64.zip
linux.32: mono_linux_x86_32.zip
linux.arm64: mono_linux_arm64.zip
linux.arm32: mono_linux_arm32.zip
macos.universal: mono_macos.universal.zip
windows.64: mono_win64.zip
windows.32: mono_win32.zip
windows.arm64: mono_windows_arm64.zip
extras:
aar_library: template_release.aar
| |
139998
|
# New asset workflow
Godot 3.0 has changed how the assets pipeline work. We now use the more familiar scheme of automatically importing assets. Simply drop an asset into the project folder and it will automatically be imported with configurable default parameters. Options can be changed afterwards after importing.

One interesting point about how this works is that copying the import folder between computers works perfectly, the editor will not attempt reimporting something until it has really changed.
Another big plus of this new system, besides improved ease of use, is that running on a device with a networked filesystem (for very fast testing times) works much better than before. Godot will pick the right compression for textures when importing for mobile, and supply them over the network.
The 3D asset workflow has also seen great improvements. It is possible to either import a scene as a single file, or to split it into multiple instantiated subscenes, keep materials, meshes and animations external, etc. Changes to resources can also be merged.
<a id="assets-gltf2"></a>
#### glTF 2.0 support
Godot now supports importing glTF 2.0 scenes. This is a new open standard by Khronos which we [encourage you](/article/we-should-all-use-gltf-20-export-3d-assets-game-engines) to use and support.

The importer is new and likely not as mature as the Collada importer, but it will get better over time.
<a id="assets-obj"></a>
#### Improved OBJ support
The venerable OBJ format is now much better supported. Materials can be read from it and importing as a full scene is now also possible.
<a id="assets-svg"></a>
#### SVG support
Daniel Ramirez ([djrm](https://github.com/djrm)) implemented importing SVG as bitmaps (with customizable resolution). This functionality is also used to better support HiDPI modes, as editor icons are converted to native resolution when the editor starts up.
This does not let you use SVGs directly as 2D meshes yet, but it's on the roadmap for future releases.
<a id="gdnative"></a>
## GDNative

GDNative is our new framework for extending Godot via external libraries. It was mostly developed by [karroffel](https://github.com/karroffel), and it's truly amazing.
Using GDNative, it's possible to easily extend Godot in C/C++ without recompiling the engine, and that for any platform. This also means that it's easy to bundle external libraries (such as OpenVR, Steam, Kinect, SQLite, etc.), or provide support for video/audio codecs, VR, etc. as pluggable libraries.
But that's not all. GDNative allows setting up extra scripting languages and using them on the fly without recompiling the engine, with pretty much native performance. Currently, work is in an advanced state for community-maintained [Python](https://github.com/touilleMan/godot-python) (via the PluginScript interface), [Nim](https://pragmagic.github.io/godot-nim/master/index.html) and [D](https://github.com/GodotNativeTools/godot-d) support, as well of course as official [C++](https://github.com/GodotNativeTools/godot-cpp) bindings; others might follow if community members are interested in working on it.
You can read more about it on its [original announcement](/article/dlscript-here) and a later [in-depth look at its architecture](/article/look-gdnative-architecture).
<a id="csharp"></a>
## Mono / C# support

Thanks to a generous grant from Microsoft, Ignacio Roldán Etcheverry ([neikeq](https://github.com/neikeq)) did a fantastic job and implemented Mono support in Godot.
It is now possible to fully script [Godot using C#](/article/introducing-csharp-godot), using your favorite IDE and the latest version of the language.
Due to popular demand, we also implemented an API mostly conformant with the C# conventions, so for C# users, the API is mostly PascalCase (instead of snake_case). The generated C# code API includes the full documentation embedded, so code completion works very well with it.
**Note:** The language support is mostly complete and it's fully usable, but it will continue improving over the next months - the main missing feature as of 3.0 is the ability to export games coded in C#, as such it's not fully usable in production yet. There's not much left to implement for this to work though, and it will be available in Godot 3.0.1 within a few weeks.
So as not to impose the additional dependency on the Mono SDK to users of other scripting languages such as GDScript or VisualScript, the C# support comes in a separate build of Godot (labeled the "mono" build).
<a id="visualscript"></a>
## Visual Scripting
Godot 3.0 also comes with a brand new visual scripting language (originally named VisualScript), in the typical "boxes and connections" fashion. The default implementation is rather low-level, but is extensible enough for users to create more high-level behaviors.

Visual scripting is ideal for non-programmers, or for programmers exposing behaviors to game designers, artists, etc.
One of the nice features of our implementation is that it's possible to drag elements from most Godot panels (filesystem, scene, properties, etc.) to the canvas, greatly improving usability.
Since this is the first stable release shipping with VisualScript, we are looking forward to your feedback to continue improving it further in future releases.
<a id="gdscript"></a>
## GDScript
GDScript has seen many improvements since 2.1. The main one is that the API has changed mostly from using functions to properties. This makes it less verbose, easier for newcomers and for finding the right property in the documentation.
The `get_node()` function also got syntactic sugar to obtain nodes in the local scene tree by writing less code, using the `$` alias.

GDScript has also seen a huge performance boost thanks to the work of HP van Braam ([hpvb/TMM](https://github.com/hpvb)), which makes its execution much faster.
A new pattern matching API is also available, courtesy of [karroffel](https://github.com/karroffel).
Code completion has also seen a great increase in accuracy.
<a id="audio"></a>
## New audio engine
Godot 3.0 comes with a brand new audio engine; the old one has been completely wiped out. This version is entirely focused on AudioStreams (samples are no longer supported). Streams (supported as .wav and .ogg files) can be played in real-time.
Stream players can now send their output to buses in a rack, allowing very high efficiency and freedom in game sound design:

There is also a large library of built-in sound effects than can be put in each channel:

Positional audio is also now fully supported, including stereo, 5.1 and 7.1 speaker configurations. 3D audio can be sent to any channel, but also areas will capture it and send it to custom channels (with split reverb).
This allows different areas to have different reverberation and effects (reverb is not the same in small and large rooms), without having to tweak snapshots like other engines do. Here is an example using the older 3D platformer demo:
<iframe width="560" height="315" src="https://www.youtube.com/embed/aRwCxMYSIk8" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
<a id="vr"></a>
##
| |
140003
|
---
title: "Maintenance release: Godot 4.2.1"
excerpt: "This first maintenance release fixes a number of platform compatibility issues introduced in Godot 4.2, which should make it much easier to upgrade from 4.1 or start new projects on all platforms."
categories: ["release"]
author: Rémi Verschelde
image: /storage/blog/covers/maintenance-release-godot-4-2-1.webp
image_caption_title: Voice of Flowers
image_caption_description: A game by Tomasz Chabora
date: 2023-12-12 13:00:00
---
We released [Godot 4.2](/article/godot-4-2-arrives-in-style/) two weeks ago, with major improvements and bug fixes all around the engine. Like any big feature release, it had a few rough edges which we've been focusing on addressing in the past couple of weeks. This allows us to already publish this first maintenance release, Godot 4.2.1, which irons out some of those issues while fully preserving compatibility.
Some of the most important fixes in this release are:
- For the GL Compatibility renderer on macOS, we switched from the Metal ANGLE backend back to native OpenGL drivers. Our hope was that ANGLE's Metal backend would make the Compatibility renderer future proof (as Apple deprecated their native OpenGL support), and fix some known driver bugs with OpenGL drivers on macOS. It turns out that ANGLE's Metal backend brings more issues than it solves, so we rolled back that change of default backend ([GH-85785](https://github.com/godotengine/godot/pull/85785)).
- Still on macOS, the new iOS one click deploy feature requires Xcode to be installed, and would use `xcode-select` to check that when the editor starts. This could trigger an install dialog that would make Godot appear frozen, without making the issue explicit to users. We switched to use `mdfind` to check if Xcode is installed, which should solve this issue ([GH-85774](https://github.com/godotengine/godot/pull/85774)).
- A number of bugs have been solved for the Vulkan renderers on Android, which could cause crashes or corrupted meshes ([GH-84852](https://github.com/godotengine/godot/pull/84852)). This might also have solved issues with garbled tilemap rendering on some Android devices, though we still need confirmation on that.
- Many users of the [Godot Jolt](https://github.com/godot-jolt/godot-jolt/) addon in 4.1 experienced the editor crashing when upgrading their project to 4.2, due to a bug in older versions of Godot Jolt which made it incompatible with Godot 4.2. Given how widespread the issue seems to be, we decided to add a hack for Godot Jolt specifically to prevent loading older versions which are known incompatible with Godot 4.2 ([GH-85779](https://github.com/godotengine/godot/pull/85779)). After successfully loading a project with Godot Jolt disabled, you should delete the old version you have installed, and reinstall the latest version (0.11.0 at the time of writing) which is compatible with Godot 4.2.
- A regression with handling of TileMap occluders was also fixed ([GH-85893](https://github.com/godotengine/godot/pull/85893)).
- Recent Emscripten releases changed their default stack size, which caused issues for Web export in Godot 4.2 when using certain APIs. We added the needed linker flags to restore the behavior from older Emscripten versions, ensuring that Godot can run successfully after being compiled by the latest Emscripten releases ([GH-86036](https://github.com/godotengine/godot/pull/86036)).
[**Download Godot 4.2.1 now**](/download/) or try the [online version of the Godot editor](https://editor.godotengine.org/4.2.1.stable/).
{% include articles/download_card.html version="4.2.1" release="stable" article=page %}
*The illustration picture used in this announcement is from* [**Voice of Flowers**](https://store.steampowered.com/app/2609560?utm_source=GodotWebsite&utm_medium=blog&utm_campaign=421), *a metroidvania that takes great inspiration from Mario games, developed in Godot 4 by [Tomasz Chabora](https://twitter.com/KoBeWi_/) ([KoBeWi](https://github.com/KoBeWi/)) – one of Godot's most profilic maintainers! You can wishlist the game [on Steam](https://store.steampowered.com/app/2609560?utm_source=GodotWebsite&utm_medium=blog&utm_campaign=421), play the latest demo [on itch.io](https://kobewi.itch.io/voice-of-flowers), and follow development on [Discord](https://discord.gg/PGhFXeHApR) and [Twitter](https://twitter.com/KoBeWi_/).*
## Changes
**42 contributors** submitted around **74 improvements** for this release. You can review the complete list of changes with our [interactive changelog](https://godotengine.github.io/godot-interactive-changelog/#4.2.1), which contains links to relevant commits and PRs for this and every previous release. Here is the complete list of changes in this release:
-
| |
140049
|
### Make rendering always happen in Linear colorspace
Texture and color information edited by users exists only in the SRGB colorspace. This happens because monitor colors are adjusted by a Gamma function, elevating them to a roughly 2.2 or 2.4 power.
To make lighting more realistic, all computations must happen in a linear color space, then converted to Gamma at the end via tonemapping.
Godot 2.x already supported linear space rendering, but this was optional. In 3.0, as we are aiming to a more realistic and high quality backend, the only supported rendering mode is linear.
### Investigate Physically Based Rendering (PBR), using GGX and alternatives
Thorough investigation was carried out on more modern rendering techniques for Godot.
As a result, we decided to use the [Disney PBR](https://disney-animation.s3.amazonaws.com/library/s2012_pbs_disney_brdf_notes_v2.pdf) specification.
Godot will use a similar parameter set for materials and shaders.
### Investigate cubemap filtering
The most common way to implement PBR in real-time is to use a pre-filtered cubemap for material roughness. This makes the reflected light more or less smoothed on demand:

Cubemap filtering was implemented and it's working well, but doubts arise whether using this or dual paraboloid maps is better. The reason is that cubemaps don't blend well between cube sides in several platforms.
### Create a new FixedMaterial that reflects PBR capabilities
A new FixedSceneMaterial resource was created, which allows editing simple materials without having to edit shaders manually. It also has the advtange of reusing shaders for similar material configurations:

### Implement PBR
The minimum required parameters for PBR are implemented and working:
* Albedo
* Specular
* Roughness
* Environment (skybox)
### Convert gizmos to use the new FixedMaterial
All gizmos were converted to use the new FixedSceneMaterial, as mentioned before.
### Add Lighting using PBR
Additive lighting has been added for the PBR backend (in Milestone 3, clustered lighting will be added).

### Investigate a ShadowAtlas allocation strategy
Godot 2.0 used individual textures for each shadow map. In the wake of more modern techniques such as clustered renderering, it is required that all shadowmaps are contained within a single texture.
Research was done first into dynamic allocation strategies for light shadows into a shadow atlas, but nothing useful was found. Every dynamic scheme implies moving around shadowmaps if no more space is available, which incurs a considerable cost.
In the end, a more static approach will take place. The shadow atlas will be divided into 4 "Quadrants" and the user will be able to specify how they want each of them subdivided. A default subdivision should cover most use cases:

But the possibility is open for developers to tweak this subdivision for games that might look better with a different scheme.
The logic to tell which cell size must be used for which light is straightforward. Every time the camera moves, each visible light computes a "coverage" value, which represents their size on screen, as example:
```
average_screen_size = (screen_width + screen_height) / 2
coverage = diameter_in_screen_pixels / average_screen_size
```
The coverage is then a value ranging from 0 to 1. To determine which cell size it must be used, the following logic applies:
```
desired_cell_size = nearest_power_of_2(largest_cell_size * coverage)
```
## Future
This has been our first report on the new renderer progress towards Godot 3.0 new renderer, hope everything was clear!
## Seeing the code
If you are interested in seeing what each feature looks like in the code, you can check the [*gles3* branch](https://github.com/godotengine/godot/commits/gles3) on GitHub.
| |
140098
|
---
title: "GDScript progress report: New GDScript is now merged"
excerpt: "New GDScript code is now merged. Here I talk a bit of what has changed, report what else I did this month and talk a bit about my current work and plans for the future."
categories: ["progress-report"]
author: George Marques
image: /storage/app/uploads/public/5f2/966/b6d/5f2966b6dca1a217164497.png
date: 2020-08-04 00:00:00
---
As some of you might be aware, the refactor that I have been working on lately is now [merged](https://github.com/godotengine/godot/pull/40598) into the `master` branch. This is the work explained in previous progress reports.
*See other articles in this Godot 4.0 GDScript series:*
1. [GDScript progress report: Writing a tokenizer](https://godotengine.org/article/gdscript-progress-report-writing-tokenizer)
2. [GDScript progress report: Writing a new parser](https://godotengine.org/article/gdscript-progress-report-writing-new-parser)
3. [GDScript progress report: Type checking is back](https://godotengine.org/article/gdscript-progress-report-type-checking-back)
4. (you are here) [GDScript progress report: New GDScript is now merged](https://godotengine.org/article/gdscript-progress-report-new-gdscript-now-merged)
5. [GDScript progress report: Typed instructions](https://godotengine.org/article/gdscript-progress-report-typed-instructions)
6. [GDScript progress report: Feature-complete for 4.0](https://godotengine.org/article/gdscript-progress-report-feature-complete-40)
## Main changes
While I did already open a pull request to [update the documentation](https://github.com/godotengine/godot-docs/pull/3623), I'll put here a short description of what have changed.
### Annotations
We now have annotation support and that is used to replace a few of the keywords that were introduced along the way. This includes the RPC keywords, such as `@master`, `@puppet`, and `@remote`.
The previous `export` keyword is now replaced by a list of specialized annotations for each case. For example, if you want a range, you can use the `@export_range` annotation. There's no need to set the type as an argument anymore, since it can be retrieved from the variable's type specifier (e.g. `@export var my_number: int`).
This has an added benefit because those annotations feature code completion and hints for their arguments, which makes them much easier to use without having to remember all the possible combinations like before.
This also includes an `@icon` annotation that must be used instead of adding after the `class_name` like before.
### Properties
The previous `setget` syntax was removed in favor of [properties](https://github.com/godotengine/godot-proposals/issues/844). It is meant to be more tied to the variable declaration and avoid you having to create dedicated functions (though you still can if you prefer). Another change is that, unlike `setget` properties always call their setter/getter even inside the same class. This gives users a consistent behavior which won't cause confusion or lead to potential mistakes during a refactor.
### `await` instead of `yield`
As you might already know, the `yield` keyword is removed in favor of `await`. It not only have a more meaningful name but it also takes advantage of first-class signals that were introduced in the `master` branch of Godot. The syntax is easier to understand and have less boilerplate (you don't need to write the `"completed"` signal when waiting coroutines.
It is also more transparent if the function you called doesn't always work as a coroutine (you don't need to treat the special case), and it's type-safe since you can't receive a function state when you were expecting the return value.
### `super` keyword
Instead of prefixing the function name with a period (`.`) now you must use the `super` keyword to call a function defined in the superclass instead of the overridden version. This also applies to constructors, making it more consistent in general and improving the flexibility.
Calling `super()` on its own will call the same function you are in but using the superclass implementation. If you need to call a different function you can use `super.function_name()`.
## Code completion
As mentioned in the previous report, code completion was a working in progress. Now it is essentially complete. I am aware that it could use some quality-of-life improvements, but this will be worked on later. For now it should be the same as it was before. If something is missing feel free to report an issue.
## Remove multi-level calls
Another common source of confusion was [removed](https://github.com/godotengine/godot/pull/40670). If you create an override of some lifecycle functions (such as `_process` or `_ready`) it still called the superclass implementation implicitly. Worse yet: some functions called the superclass before the subclass, and some went on the other direction.
This behavior is now completely removed. If you need to call the parent implementation, you can use the `super` keyword as mentioned above. This is common in <abbr title="Object-Oriented Programming">OOP</abbr> languages, so it should be more aligned to what user expects (which is evidenced by multiple issues reporting this behavior in the past). It also gives users control of _when_ the super implementation should be called.
Note that methods defined in the C++ code are still called. This is needed to make sure engine behavior is correct (like the button `_gui_input` which is needed to execute the pressing behavior).
## Testing
I spent a good chunk of the time testing GDScript implementation and fixed an amount of bugs and crashes along the way. If you find crashes or issues don't be afraid to [report it](https://github.com/godotengine/godot/issues) (if it wasn't reported before). Eventually I'll do a big round of bug fixes to stabilize it for release, but for now I'll try to keep GDScript in a workable state and especially without crashes.
## Future
Currently I'm working on an abstraction for the code generation interface. This will be helpful to create an ability to change the target backend language (which currently is only the GDScript VM itself) without having to change the compiler, eventually paving way to code optimization as well. More details on this will come in the next progress report.
After that I'll start adding typed instructions to the VM which should increase the speed massively for when you use static typing in the script.
| |
140131
|
articles/download_card.html version="4.2" release="beta1" article=page %}
**Standard build** includes support for GDScript and GDExtension.
**.NET build** (marked as `mono`) includes support for C#, as well as GDScript and GDExtension.
- .NET build requires [.NET SDK 6.0](https://dotnet.microsoft.com/en-us/download/dotnet/6.0) or [7.0](https://dotnet.microsoft.com/en-us/download/dotnet/7.0) installed in a standard location.
- To export to Android, .NET 7.0 or later is required. To export to iOS, .NET 8.0 is required. Make sure to set the target framework in the `.csproj` file.
{% include articles/prerelease_notice.html %}
## Known issues
There are currently no known issues introduced by this release.
With every release we accept that there are going to be various issues, which have already been reported but haven't been fixed yet. See the GitHub issue tracker for a complete list of [known bugs](https://github.com/godotengine/godot/issues?q=is%3Aissue+is%3Aopen+label%3Abug+).
## Bug reports
As a tester, we encourage you to [open bug reports](https://github.com/godotengine/godot/issues) if you experience issues with this release. Please check the [existing issues on GitHub](https://github.com/godotengine/godot/issues) first, using the search function with relevant keywords, to ensure that the bug you experience is not already known.
In particular, any change that would cause a regression in your projects is very important to report (e.g. if something that worked fine in previous 4.x releases, but no longer works in 4.2 beta 1).
## Support
Godot is a non-profit, open source game engine developed by hundreds of contributors on their free time, as well as a handful of part or full-time developers hired thanks to [generous donations from the Godot community](https://fund.godotengine.org/). A big thank you to everyone who has contributed [their time](https://github.com/godotengine/godot/blob/master/AUTHORS.md) or [their financial support](https://github.com/godotengine/godot/blob/master/DONORS.md) to the project!
If you'd like to support the project financially and help us secure our future hires, you can do so using the [Godot Development Fund](https://fund.godotengine.org/) platform managed by [Godot Foundation](https://godot.foundation/). There are also several [alternative ways to donate](/donate) which you may find more suitable.
| |
140175
|
---
title: "Web Editor beta, AudioWorklet, GDNative and more!"
excerpt: "The Web Editor reaches beta (3.2.4 beta 4), GDNative lands on the web, thread-enabled HTML5 builds now come with an improved audio driver using the AudioWorklet API."
categories: ["progress-report"]
author: Fabio Alessandrelli
image: /storage/app/uploads/public/5fe/1bf/c80/5fe1bfc80c475551734903.png
date: 2020-12-24 17:00:00
---
Howdy Godotters! The year is almost over and it's about time we give you some news about the Web Editor and the HTML5 export.
It's been a very fruitful year for Godot on the Web since the announcement of the first [web editor prototype](https://godotengine.org/article/godot-editor-running-web-browser).
As expected in the rationale of the original post, this has produced a lot of improvements to the HTML5 export in general, including support for threads, file drag and drop, <abbr title="Link-Time Optimization">LTO</abbr> for smaller and faster builds, low-processor mode with optional lower framerate, and better control over audio output latency and mix rate. See the [second](https://godotengine.org/article/godot-web-progress-report-2) and [third](https://godotengine.org/article/godot-web-progress-report-3) reports for more details.
With this new report, as you may know if you checked out the last [Godot Live Q&A](https://www.youtube.com/watch?v=zGmCbnE0UqA), we're happy to announce that we have added optional **GDNative support** in HTML5 exports, that the optional **Thread support** now comes with an improved audio driver using the **AudioWorklet API**, and that the **Web Editor has reached the beta stage**.
Optional GDNative/Threads support
=================================
If you tried out Godot [3.2.4 beta 4](https://godotengine.org/article/dev-snapshot-godot-3-2-4-beta-4) you might have noticed that the HTML5 export now has a new option called `Export Type`, which allows you to select either the `Regular`, `Threads`, or `GDNative` build.
Sadly, as of now, it is not possible to have an export that supports both GDNative and Threads. This is a [documentated limitation](https://github.com/emscripten-core/emscripten/wiki/Linking#pthreads-support) of the toolchain used to create the web export, and the WebAssembly specification itself. We are still investigating a workaround for that, but it's going to take some more time.

Each export type has advantages and disadvantages, so you should choose depending on the scope and target of your game:
- **Regular**: It is the most compatible acrosss browsers but does not support multithreading nor GDNative.
- **Threads**: Supports multithreading via the [Thread](https://docs.godotengine.org/en/stable/classes/class_thread.html) and [Mutex](https://docs.godotengine.org/en/stable/classes/class_mutex.html) classes and comes with a low latency audio driver that runs off the main thread preventing it from stalling or crackling when framerate drops or when changing scenes. However, it is currently not supported by all browsers (notably, Safari and thus iOS does not support it yet). It also requires some [extra care when distributing](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/SharedArrayBuffer#Security_requirements).
- **GDNative**: Supports GDNative libraries, allowing to write your code in other languages (e.g. C++) so you can further optimize your game and speed up execution, or bind external libraries to add extra functionalities to the engine. On the downsides, it will result in a bigger build size, thus increasing the startup time and memory usage.
In general, my suggestion if you are not interested in mobile browser support is to use the *Threads* build to get the best performance with the smallest effort.
Hopefully, Safari's support for WebAssembly threads will ship next year thanks to the great work people are doing on the WebKit project, and even developers interested in HTML5 exports for mobile will be able to use the threaded version.
Web Editor beta
===============
A new version of the web editor has been published at [this temporary address](https://godotengine.org/editor/).
This is the first release tagged as "official" and based on the exact same sources of the other [3.2.4 beta 4](https://godotengine.org/article/dev-snapshot-godot-3-2-4-beta-4) builds. The goal is to keep the editor in sync with stable builds (starting from 3.2.4 stable). Old versions will still be available at dedicated locations.

The highlights of the new version are:
- A new improved look and feel of the HTML page that looks more integrated with the editor (special thanks to [Calinou](https://github.com/Calinou)).
- All importing issues should have been fixed. Most notably, audio samples can now be imported correctly, so the demo project now contains audio too.
- An old bug in the thirdparty Tween functions has been fixed that caused weird behaviors on HTML5 exports (and potentially other platforms too).
- Few bugs have been fixed in the JavaScript code that interfaces HTML5 exports with the browser (that glue-code has been mostly rewritten).
Honorable mentions
==================
I would like to give a shout-out to the amazing work done by [dsnopek](https://github.com/dsnopek) and [lawnjelly](https://github.com/lawnjelly) on [WebXR](https://github.com/godotengine/godot/pull/42397) and [GLES/WebGL in Godot 4.0](https://twitter.com/lawnjelly/status/1336767514227957761) respectively. You well deserve the Web export medal of honor.
Future work
===========
We feel like the web editor is almost ready to become stable, and plan to release it as such when 3.2.4 is released, but there are still few things that needs to be improved for web exports before I can refocus my work on some other ideas I've been sketching for networking and server exports:
The HTML5-related **documentation** needs some love to be more in sync with the current status.
**Gamepad** support on HTML5 is in an abysmal state, this is for many reasons. First of all the W3C specification is incomplete and does not allow to easily identify devices in a unique way. Additionally, the sheer amount of work required to support all possible devices is too much for browser vendors.
In an effort to improve the current situation, we plan to create a small addon that allows to remap the gamepad on the fly that you can bundle in your game, or send your generated mappings to us (so we can bundle them in future Godot versions). While this won't solve all the problems until the specification is improved and browser support gets better, it should allow for many more controller to work out of the box across different platforms.
I've recently proposed a better way than `eval` to **communicate with JavaScript**, and that will need a tentative implementation.
Stay tuned for the next announcement!
References
==========
[GDNative support](https://github.com/godotengine/godot/pull/44076) [(3.2)](https://github.com/godotengine/godot/pull/44170)
[Threads/AudioWorklet](https://github.com/godotengine/godot/pull/43443) [(3.2)](https://github.com/godotengine/godot/pull/43454)
[Importing issues](https://github.com/godotengine/godot/pull/44161)
[Tween bug](https://github.com/godotengine/godot/pull/44197)
[Editor style](https://github.com/godotengine/godot/pull/44221) [(3.2)](https://github.com/godotengine/godot/pull/44256)
| |
140206
|
---
title: "Basic cryptography, SSL improvements"
excerpt: "As part of the MOSS project sponsored by Mozilla, during July I worked on some new features regarding cryptography and SSL to improve the quality and security of Godot networking."
categories: ["progress-report"]
author: Fabio Alessandrelli
image: /storage/app/uploads/public/5d8/f4b/b0a/5d8f4bb0a3174096196968.png
date: 2019-09-28 12:15:00
---
As part of the MOSS project sponsored by Mozilla, during July I worked on some new features regarding cryptography and SSL to improve the quality and security of Godot networking.
Certificates and keys as resources
----------------------------------
SSL certificates (in the form of `*.crt` files) and private keys (in the form of `*.key` files) are now treated as resources in Godot. This means they will be exported automatically and they can be loaded via the GDScript `load()` function.
Crypto class
------------
A new [`Crypto`](http://docs.godotengine.org/en/latest/classes/class_crypto.html) class was introduced that allows to access some basic cryptographic functions.
- You can generate cryptographically secure random bytes via the `generate_random_bytes()` function. The bytes are returned in a `PoolByteArray`.
- You can generate RSA keys that can be used by `StreamPeerSSL` to act as a server.
- You can generate SSL self-signed certificates that again, can be used by `StreamPeerSSL` to act as a server.
Hashing Context
---------------
A new [`HashingContext`](http://docs.godotengine.org/en/latest/classes/class_hashingcontext.html) class now provides an interface for computing cryptographic hashes (MD5, SHA-1, SHA-256) over multiple iterations.
This is useful for example when computing hashes of big files (so you don't have to load them all in memory), network streams, and data streams in general (so you don't have to hold buffers). Here is an example of how it works:
```
const CHUNK_SIZE = 1024
func hash_file(path):
var ctx = HashingContext.new()
var file = File.new()
# Start a SHA-256 context.
ctx.start(HashingContext.HASH_SHA256)
# Check that file exists.
if not file.file_exists(path):
return
# Open the file to hash.
file.open(path, File.READ)
# Update the context after reading each chunk.
while not file.eof_reached():
ctx.update(file.get_buffer(CHUNK_SIZE))
# Get the computed hash.
var res = ctx.finish()
# Print the result as hex string and array.
printt(res.hex_encode(), Array(res))
```
SSL improvements
----------------
[`StreamPeerSSL`](http://docs.godotengine.org/en/latest/classes/class_streampeerssl.html) can now use a per-object SSL certificate (i.e. you no longer have to set the trusted certificates om project settings), you can specify the valid certificate by passing an `X509Certificate ` as last parameter in `connect_to_stream()`.
`StreamPeerSSL` can now act as a server. The new `accept_stream()` function, which accepts a private key, a certificate, and an optional CA chain, will try to establish a connection with the given stream acting as a server. This will soon also allow us to support acting as a WebSocket server over TLS.
Here is an example of a test HTTPS server made in GDScript... not meant to be used in production ;-)
```
extends Node
# A class that represents a client accepted by our server.
class Client extends Reference:
# The SSL stream of this client.
var ssl = StreamPeerSSL.new()
# Received request.
var recv = ""
# Set the stream for this client.
func set_stream(stream, key, cert):
ssl.blocking_handshake = false
ssl.accept_stream(stream, key, cert)
# Process network operations for this client.
func process():
if ssl.get_status() == StreamPeerSSL.STATUS_HANDSHAKING:
# Still performing handshake.
ssl.poll()
return
if ssl.get_status() != StreamPeerSSL.STATUS_CONNECTED:
# Disconnected.
return
ssl.poll()
# Read available bytes.
if ssl.get_available_bytes() > 0:
recv += ssl.get_data(ssl.get_available_bytes())[1].get_string_from_utf8()
# Send response if request is complete.
if recv.ends_with("\r\n\r\n"):
ssl.put_data(("HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n\r\n" + \
"<h2>Godot TLS Test Server</h2>\r\n" + \
"<p>Successful connection using SSL</p>\r\n").to_utf8())
ssl.disconnect_from_stream()
func is_disconnected():
return ssl.get_status() != StreamPeerSSL.STATUS_HANDSHAKING and \
ssl.get_status() != StreamPeerSSL.STATUS_CONNECTED
# Our TCP server.
var _server = TCP_Server.new()
# A list of connected clients.
var _clients = []
# Our private key and certificate.
var _key = null
var _cert = null
func _ready():
var crypto = Crypto.new()
# Generate an RSA key (this should be done in a thread to avoid blocking).
_key = crypto.generate_rsa(4096)
# Generate a self signed certificate to use with our server.
_cert = crypto.generate_self_signed_certificate(_key, "CN=example.com,O=A Game Company,C=IT")
# Start listening on "*:4343".
_server.listen(4343)
func _process(delta):
# Take new connections.
if _server.is_connection_available():
var c = Client.new()
c.set_stream(_server.take_connection(), _key, _cert)
_clients.append(c)
# Take note of disconnected clients.
var to_rem = []
# Process clients and send response when done.
for c in _clients:
c.process()
if c.is_disconnected():
to_rem.append(c)
# Remove disconnected clients.
for c in to_rem:
_clients.erase(c)
```
Future work
-----------
This has been quite a long work, and included some refactoring of the `core` code to use a single library for cryptography (mbedTLS) instead of multiple specific libraries for hashing algorithms and AES. This work will allow us to introduce support for AES encryption and more at scripting level in future versions.
Additionally, the SSL overhaul helped a lot in developing the upcoming DTLS implementation.
References
----------
- [PR refactoring the `core` crypto code](https://github.com/godotengine/godot/pull/30239)
- [PR adding Crypto class, Hashing contexts, SSL server](https://github.com/godotengine/godot/pull/29871)
- [PR adding documentation for above-mentioned additions](https://github.com/godotengine/godot/pull/32285)
| |
140305
|
# Downloads
{% include articles/download_card.html version="4.1.4" release="rc3" article=page %}
**Standard build** includes support for GDScript and GDExtension.
**.NET 6 build** (marked as `mono`) includes support for C#, as well as GDScript and GDExtension.
- .NET build requires [.NET SDK 6.0](https://dotnet.microsoft.com/en-us/download/dotnet/6.0) or [7.0](https://dotnet.microsoft.com/en-us/download/dotnet/7.0) installed in a standard location.
{% include articles/download_card.html version="4.2.2" release="rc3" article=page %}
**Standard build** includes support for GDScript and GDExtension.
**.NET build** (marked as `mono`) includes support for C#, as well as GDScript and GDExtension.
- .NET build requires [.NET SDK 6.0](https://dotnet.microsoft.com/en-us/download/dotnet/6.0), [7.0](https://dotnet.microsoft.com/en-us/download/dotnet/7.0), or [8.0](https://dotnet.microsoft.com/en-us/download/dotnet/8.0) installed in a standard location.
- To export to Android, .NET 7.0 or later is required. To export to iOS, .NET 8.0 is required. Make sure to set the target framework in the `.csproj` file.
{% include articles/prerelease_notice.html %}
## Known issues
There are currently no known issues introduced by these releases.
With every release we accept that there are going to be various issues, which have already been reported but haven't been fixed yet. See the GitHub issue tracker for a complete list of [known bugs](https://github.com/godotengine/godot/issues?q=is%3Aissue+is%3Aopen+label%3Abug+).
## Bug reports
As a tester, we encourage you to [open bug reports](https://github.com/godotengine/godot/issues) if you experience issues with this release. Please check the [existing issues on GitHub](https://github.com/godotengine/godot/issues) first, using the search function with relevant keywords, to ensure that the bug you experience is not already known.
In particular, any change that would cause a regression in your projects is very important to report (e.g. if something that worked fine in previous 4.x releases no longer works).
## Support
Godot is a non-profit, open source game engine developed by hundreds of contributors on their free time, as well as a handful of part or full-time developers hired thanks to [generous donations from the Godot community](https://fund.godotengine.org/). A big thank you to everyone who has contributed [their time](https://github.com/godotengine/godot/blob/master/AUTHORS.md) or [their financial support](https://github.com/godotengine/godot/blob/master/DONORS.md) to the project!
If you'd like to support the project financially and help us secure our future hires, you can do so using the [Godot Development Fund](https://fund.godotengine.org/) platform managed by [Godot Foundation](https://godot.foundation/). There are also several [alternative ways to donate](/donate) which you may find more suitable.
| |
140318
|
Solution
### Preparation
Solving the problems identified above does not actually require the introduction of an acyclic graph: inserting synchronization barriers and performing dependency tracking is entirely possible without applying this technique. This was actually debated internally for a while, but it was determined that if the engine was able to reorder commands, it'd allow for grouping them more effectively between the mandatory synchronization points and would result in better performance.
However, being able to reorder commands meant that an intermediate step had to be introduced where commands were recorded into an auxiliary structure that could be reordered and then converted to the corresponding native API commands. One possibility was encoding the Vulkan command arguments into the auxiliary buffer, but that approach meant the entire graph structure and logic would need to be implemented for every other backend as well. Therefore, it was deemed it'd be necessary for Pedro to work on his [pull request](https://github.com/godotengine/godot/pull/83452) that introduces an abstract interface for all the supported graphic APIs, including Vulkan, D3D12 and Metal in the near future. Thanks to this change, it was possible to use a single abstract API to encode commands into the auxiliary buffer.
The initial redesign was laid out in *reduz*'s [draft](https://gist.github.com/reduz/980b9b2547d57e6a915b2bb7e1e76e08), which was largely inspired by [Pavlo Muratov's "Organizing GPU Work with Directed Acyclic Graphs"](https://levelup.gitconnected.com/organizing-gpu-work-with-directed-acyclic-graphs-f3fd5f2c2af3) and showed the possibility of how the concept could be applied to Godot's existing design. Not everything stated in the document made it into the final version: in fact, the changes to RenderingDevice were much less severe than initially indicated and the interface remained largely compatible. While the article that was used as inspiration includes a very detailed algorithm in how to implement multi-queue submission by using the graph, the team made the decision to cut this idea short and stick to a single command queue to begin with, as the difficulty of the task would come from building the graph automatically and would already take a significant amount of development time.
### Acyclic graph
The unique aspect of the implemented graph is that its construction is completely invisible to the programmer using RenderingDevice. Commands requested from the class are logged internally and each command maintains an adjacency list that is updated as new dependencies are detected. Since these adjacencies only work one way and older commands cannot depend on future commands, it is virtually impossible for cyclic dependencies to form (hence the "acyclic" part of the graph). While a graph can be constructed in many ways, a list of vertices and an adjacency list are sufficient. Render commands play the role of vertices, and commands store the indices of their adjacent commands.
|  |
|:--:|
| *The rendering operations of a frame and their dependencies represented as a graph.* |
An important decision that was made to allow this structure to scale more effectively is that each instance of a draw list or a compute list are considered as **one node in the graph**. There is no benefit to allowing reordering within these structures and Godot already has a clear concept of what these lists are used for. Games often draw a lot of geometry, but they don't create tons of render passes per frame, as that doesn't result in efficient use of the GPU. To put it in numbers, one of the benchmark scenes used during testing could easily reach hundreds of thousands of nodes if each individual command was recorded into the graph. Making the distinction to correlate render passes to individual nodes brought this number down to about **300 nodes per frame**. Operating with a graph of this scale was a very good sign that the performance overhead would be very small.
Once all commands for the frame have been recorded, a [topological sort](https://en.wikipedia.org/wiki/Topological_sorting) is performed on the graph to get an ordered list of commands. During this step the "levels" of the commands are detected to determine how they can be grouped and where synchronization points (barriers) should be introduced. All commands belonging to a particular level in the graph can be executed in parallel as no dependencies have been detected between them, meaning that no barriers are required until the next level is reached. This sorting step is where the magic behind the performance gains happens.
|  |
|:--:|
| *After sorting, all commands that belong to the same level can be executed in any order, resulting in multiple possible command sequences.* |
One important detail that resulted in frametime reductions during this step was to take into account the type of command as a sorting factor: grouping together operations based on whether they were related to copying data, drawing or compute dispatches provided some noticeable increases in performance. While the exact reason behind this has not been determined, it seems likely that GPUs prefer to change the type of pipeline they need to use within a command buffer as little as possible.
While the concept of using a structure like a graph and using sorting algorithms might sound like the most daunting part of the task due to the level of abstraction involved, it is the dependency tracking and adjacency list detection during command recording where most problems arise. The relationships shown in the diagrams above were not specified by the programmer using RenderingDevice: they must be detected automatically based on how the resources were used in the frame, and this turned out to be no small task due to some particular details of how Godot works.
### Resource tracking
The resources used by RenderingDevice in Godot are buffers or textures. While these are separate objects at the lower level depending on the API being used, the graph considers them both as one to share much of the logic during implementation. However, a distinction will be made later when texture slices are introduced, which is something Godot uses quite a bit in various parts of its rendering code. Textures also have the additional requirement that they need to make layout transitions to be ready for use in different commands, while buffers don't need to do this at all.
Whenever a resource is created, a new "tracker" structure is introduced to store the information relevant to the graph construction during command recording. The tracker holds references to which commands are writing or reading from the resource and modifies these lists accordingly as more commands are recorded. It also stores a "usage" variable that indicates what the current use of the resource is at the time of recording. Usages are both classified as "read" or "read-write" operations, and which one is used has strong implications for how dependencies between commands will be detected. For example, a command that reads from Resource A can be executed in parallel with another command that reads from Resource A, but that will not be valid if the other command can write to Resource A. In this case, a dependency is inserted between the two commands to ensure that the first command can finish reading the resource correctly before the next command modifies it.
|  |
|:--:|
| *The tracker holds the current usage of a resource and determines whether it is necessary to perform a transition based on the type of command that references it.* |
Textures also have a particular requirement: changing the usage implies a memory layout transition even if it's just for read-only operations. For example, there's different layouts for a texture being used as the source of a copy operation and for a texture being used for sampling in a shader. While this distinction might not necessarily be true at the hardware level, it is actually possible to witness texture corruption if these transitions are not performed correctly depending on the GPU's architecture (AMD is really good for testing these out!). Therefore, any change in usage when textures are involved is usually considered a write operation as most of them require a particular layout. This introduces some dependencies between commands that might not be very obvious but are completely required for the operations to work correctly: continuing with the previous example, it's not possible to use the optimal memory layout for copying a texture and sampling it in a shader in parallel, even if both are read-only operations.
##
| |
140323
|
# How to make a custom SkeletonModifier3D?
`SkeletonModifier3D` is a virtual class, so you can't add it as stand alone node to a scene.

Then, how do we create a custom `SkeletonModifier3D`? Let's try to create a simple custom `SkeletonModifier3D` that points the Y-axis of a bone to a specific coordinate.
## 1. Create a script
Create a blank gdscript file that extends `SkeletonModifier3D`. At this time, register the custom `SkeletonModifier3D` you created with the `class_name` declaration so that it can be added to the scene dock.
```gdscript
class_name CustomModifier
extends SkeletonModifier3D
```

## 2. Add some declarations and properties
If necessary, add a property to set the bone by declaring `@export_enum` and set the `Skeleton3D` bone names as a hint in `_validate_property()`. You also need to declare `@tool` if you want to select it in the editor.
```gdscript
@tool
class_name CustomModifier
extends SkeletonModifier3D
@export var target_coordinate: Vector3 = Vector3(0, 0, 0)
@export_enum(" ") var bone: String
func _validate_property(property: Dictionary) -> void:
if property.name == "bone":
var skeleton: Skeleton3D = get_skeleton()
if skeleton:
property.hint = PROPERTY_HINT_ENUM
property.hint_string = skeleton.get_concatenated_bone_names()
```
The `@tool` declaration is also required for previewing modifications by `SkeletonModifier3D`, so you can consider it is required basically.

## 3. Coding calculations of the modification in `_process_modification()`
```gdscript
@tool
class_name CustomModifier
extends SkeletonModifier3D
@export var target_coordinate: Vector3 = Vector3(0, 0, 0)
@export_enum(" ") var bone: String
func _validate_property(property: Dictionary) -> void:
if property.name == "bone":
var skeleton: Skeleton3D = get_skeleton()
if skeleton:
property.hint = PROPERTY_HINT_ENUM
property.hint_string = skeleton.get_concatenated_bone_names()
func _process_modification() -> void:
var skeleton: Skeleton3D = get_skeleton()
if !skeleton:
return # Never happen, but for the safety.
var bone_idx: int = skeleton.find_bone(bone)
var parent_idx: int = skeleton.get_bone_parent(bone_idx)
var pose: Transform3D = skeleton.global_transform * skeleton.get_bone_global_pose(bone_idx)
var looked_at: Transform3D = _y_look_at(pose, target_coordinate)
skeleton.set_bone_global_pose(bone_idx, Transform3D(looked_at.basis.orthonormalized(), skeleton.get_bone_global_pose(bone_idx).origin))
func _y_look_at(from: Transform3D, target: Vector3) -> Transform3D:
var t_v: Vector3 = target - from.origin
var v_y: Vector3 = t_v.normalized()
var v_z: Vector3 = from.basis.x.cross(v_y)
v_z = v_z.normalized()
var v_x: Vector3 = v_y.cross(v_z)
from.basis = Basis(v_x, v_y, v_z)
return from
```
`_process_modification()` is a virtual method called in the update process after the AnimationMixer has been applied, as described in the sequence diagram above. If you modify bones in it, it is guaranteed that the order in which the modifications are applied will match the order of `SkeletonModifier3D` of the `Skeleton3D`'s child list.
<video autoplay loop muted playsinline>
<source src="/storage/blog/design-of-the-skeleton-modifier-3d/custom_modifier.webm?1" type="video/webm">
</video>
Note that the modification should always be applied to the bones at 100% amount. Because `SkeletonModifier3D` has an `influence` property, the value of which is processed and interpolated by `Skeleton3D`. In other words, you do not need to write code to change the amount of modification applied; You should avoid implementing duplicate interpolation processes. However, if your custom `SkeletonModifier3D` can specify multiple bones and you want to manage the amount separately for each bone, it makes sense that adding the amount properties for each bone to your custom modifier.
Finally, remember that this method will not be called if the parent is not a `Skeleton3D`.
## 4. Retrieve modified values from other Nodes
The modification by `SkeletonModifier3D` is immediately discarded after it is applied to the skin, so it is not reflected in the bone pose of `Skeleton3D` during `_process()`.
If you need to retrieve the modificated pose values from other nodes, you must connect them to the appropriate signals.
For example, this is a `Label3D` which reflects the modification after the animation is applied and after all modifications are processed.
```gdscript
@tool
extends Label3D
@onready var poses: Dictionary = { "animated_pose": "", "modified_pose": "" }
func _update_text() -> void:
text = "animated_pose:" + str(poses["animated_pose"]) + "\n" + "modified_pose:" + str(poses["modified_pose"])
func _on_animation_player_mixer_applied() -> void:
poses["animated_pose"] = $"../Armature/Skeleton3D".get_bone_pose(1)
_update_text()
func _on_skeleton_3d_skeleton_updated() -> void:
poses["modified_pose"] = $"../Armature/Skeleton3D".get_bone_pose(1)
_update_text()
```
You can see the pose is different depending on the signal.

### Download
[skeleton-modifier-3d-demo-project.zip](/storage/blog/design-of-the-skeleton-modifier-3d/skeleton-modifier-3d-demo-project.zip)
# Do I always need to create a custom SkeletonModifier3D when modifying a Skeleton3D bone?
As explained above, the modification provided by `SkeletonModifier3D` is temporary. So `SkeletonModifier3D` would be appropriate for effectors and controllers as **post FX**.
If you want permanent modifications, i.e., if you want to develop something like a bone editor, then it makes sense that it is not a `SkeletonModifier3D`. Also, in simple cases where it is guaranteed that no other `SkeletonModifier3D` will be used in the scene, your judgment will prevail.
# What kind of SkeletonModifier3D nodes are included in Godot 4.3?
For now, Godot 4.3 will be containing only `SkeletonModifier3D` which is a migration of several existing nodes that have been in existence since 4.0.
But, there is good news! We are planning to add some built in `SkeletonModifier3D`s in Godot 4.4, such as new IK, constraint, and springbone/jiggle.
If you are interested in developing your own effect using `SkeletonModifier3D`, feel free to make a proposal to include it in core.
## Support
Godot is a non-profit, open source game engine developed by hundreds of contributors on their free time, as well as a handful of part or full-time developers hired thanks to [generous donations from the Godot community](https://fund.godotengine.org/). A big thank you to everyone who has contributed [their time](https://github.com/godotengine/godot/blob/master/AUTHORS.md) or [their financial support](https://github.com/godotengine/godot/blob/master/DONORS.md) to the project!
If you'd like to support the project financially and help us secure our future hires, you can do so using the [Godot Development Fund](https://fund.godotengine.org/) platform managed by [Godot Foundation](https://godot.foundation/). There are also several [alternative ways to donate](/donate) which you may find more suitable.
| |
140351
|
---
title: "Multiplayer in Godot 4.0: Scene Replication (part 1)"
excerpt: "The long-awaited first post about the multiplayer replication system in development for Godot 4.0 is here!
Check out the design goals, concepts, initial prototype, and as always, stay tuned for more!"
categories: ["progress-report"]
author: Fabio Alessandrelli
image: /storage/app/uploads/public/61a/27f/881/61a27f8816a4e934017559.png
date: 2021-11-27 19:00:00
---
Howdy Godotters!
It's finally time for the long-awaited post about the new multiplayer replication system that is being developed for Godot 4.0.
Below, we will introduce the concepts around which it was designed, the currently implemented prototype, and planned changes to make it more powerful and user-friendly.
*See other articles in this Godot 4.0 networking series:*
1. [Multiplayer in Godot 4.0: On servers, RSETs and state updates](https://godotengine.org/article/multiplayer-changes-godot-4-0-report-1)
2. [Multiplayer in Godot 4.0: RPC syntax, channels, ordering](https://godotengine.org/article/multiplayer-changes-godot-4-0-report-2)
3. [Multiplayer in Godot 4.0: ENet wrappers, WebRTC](https://godotengine.org/article/multiplayer-changes-godot-4-0-report-3)
4. (you are here) [Multiplayer in Godot 4.0: Scene Replication (part 1)](https://godotengine.org/article/multiplayer-changes-godot-4-0-report-4)
## Design goals
Making multiplayer games has historically been a complex task, requiring ad-hoc optimizations and game-specific solutions. Still, two main concepts are almost ubiquitous in multiplayer games: some form of **messaging**, and some form of **state replication** (synchronization and reconciliation).
While Godot does provide a system for messaging (i.e. <abbr title="Remote Procedure Calls">RPC</abbr>), it does not provide a common system for replication.
In this sense, we had quite a few [#networking meetings](https://chat.godotengine.org/) in August 2021 to design a replication API that could be used for the common cases, while being extensible via plugins or custom code.
The design goals that emerged for such an API where:
- Provide an out-of-the-box solution for scene state replication across the network.
- Allow for (almost) no-code prototyping.
- Be extensible with game-specific behaviours (custom reconciliation, interpolation, interest management, etc).
- Allow ex-post (incremental) optimizations of network code.
- Be easy to use for game developers, of course :)
### Glossary
- `State`: The informations (properties) about an Object relevant to the multiplayer game.
- `Spawn`: Creating, or requesting remotely to create a new Object.
- `Sync`: Updating, or requesting remotely to update the state of an Object.
### Security
When dealing with computer networks, it's important to understand the security implication of transfering data across machines.
For instance, Godot does not allow [decoding objects](https://docs.godotengine.org/en/stable/classes/class_multiplayerapi.html#class-multiplayerapi-property-allow-object-decoding) by default, since they could carry scripts with them or force the receiving end to execute specific code during initialization. This is a security vulnerability, as arbitrary code execution of this kind would allow for servers to access or manipulate any file on the client's filesystem that the game process has access to.
In a similar way, the replication API will let you specify which scenes can be spawned by a remote peer. Tthe final implementation will also allow for fine-grained control over which node can be spawned at each specific path.
### Optimizations
Optimizations, and bandwidth optimizations in particular, are crucial to an effective networking protocol.
- Synchronizing multiple properties is very useful in the prototyping stage, but bad in terms of potential optimizations.
- A very quick way to optimize the network code later on is to replicate a single property that returns a tightly packed representation of the object state based on your game's unique characteristics.
When done properly, this is also going to be the most optimized state possible that no tool can produce for you.
- The replication API will still try to squeeze the state size as much as possible with the information in its hands.
| |
140377
|
---
title: "Godot Engine reaches 2.0 stable"
excerpt: "Godot 2.0 is out! This release is special because our team has grown a lot. We have more regular contributors, a documentation team, a bug triage team and a much larger community! Godot keeps growing and becoming more and more awesome."
categories: ["release"]
author: Juan Linietsky
image: /storage/app/uploads/public/56c/bd3/9db/56cbd39dbdddc275301297.png
date: 2016-02-23 00:00:00
---
## Godot 2.0
A little more than two years ago, Godot was open sourced. It was meant to be an in-house tool and, while it worked for use in internal projects, it was far from the usability expected when you have thousands of developers working with it.
After a year of hard work and community feedback, Godot 1.0 was released, marking the first version that was ready for general consumption. This version worked well but we felt it was still far from the usability and features of a modern game engine. The more urgent issue was to improve the 2D engine so we worked hard again and released Godot 1.1, which did in fact improve 2D rendering considerably.
Usability still remained a pressing issue, so we made a long list of tasks to improve upon for 2.0. We worked hard and after about 8 months we now finally have a stable Godot ready for you!
This release is special because our team has grown a lot. We have more regular contributors, a documentation team, a bug triage team and a much larger community! Godot keeps growing and becoming more and more awesome.
[See the full list of changes](http://downloads.tuxfamily.org/godotengine/2.0/Godot_v2.0_stable_changelog.txt).
## New core features
While for 2.0 core changes were not a priority, there are some nice improvements on this release!
#### Improved scene instancing
Instancing is one of Godot's best features. For this version it has been further improved. Previously, only the root node of a scene was editable. Changes to sub-nodes would result in data loss.
It is now possible to edit any children node of the instanced scene and have the changes persist. Even sub-instances of instances can be edited with persistent modifications.

#### Scene inheritance
Begining Godot 2.0 scenes can not only be instanced but also inherited. This allows many interesting use cases such as:
* Having a base scene (ie, enemy, collectable, obstacle, etc.) with a node hierarchy common to all (like explosions, shines, indicators, etc), and then extend it for each class.
* Making non-destructive changes to a scene that was imported (ie a 3D scene, etc.)
* Making non-destructive changes to a scene created by another team member.

#### New text-based scene format
Godot supports the XML format for saving scenes and resources, but we had problems with it:
* Scenes are saved packed in xml, so the content is uncomprehensible.
* The format is not efficient to parse.
* It is not friendly to VCS (Git, SVN).
* XML is not easy to write manually, and it's easy to make mistakes.
Having this in consideration, Godot 2.0 adds a new text file format inspired by [TOML](https://github.com/toml-lang/toml). This new format is descriptive (human friendly) when saving scenes, and Git/SVN friendly:

For 3.0, this format will be the only text based format supported, and XML will be deprecated.
#### ``onready`` & singletons
Initializing class member variables can be quite of a hassle, code such as this is common in GDScript:

The 'onready' keyword allows initialization of class member variables at the same time when _ready is called, saving some code:

But doing this for autoloaded scenes can still be a hassle, as it has to be done every time a script is created. To ease on this, it's possible to make an autoloaded scene or script a singleton variable (accessible at global scope) in the project settings:

All this, of course, working together perfectly with code completion.
#### Other new core features
Smaller new core features were also added:
* Support for ZIP packs on export instead of PCK
* Support for OPUS Audio Format
* Changed to a more compatible JPG decoder.
* Very improved gamepad support
| |
140412
|
cripting
<a id="gdscript"></a>
### GDScript
With **GDScript** being the most used language among current Godot users, we wanted to really improve the coding experience in Godot 4 with some of the most requested language features.
You will find the static typing system is now more robust with no cyclic dependency errors and the ability to type arrays. You can reap the benefits of first-class functions, lambdas and signals, a new property syntax, the await and super keywords, and functional tools like map or reduce.

With less use of strings, your code can be much more reliable. New built-in annotations make the language clearer and improve syntax for exported properties. You can name variables and functions using unicode characters, making code easier to write and read for developers who rely on non-latin alphabets.
Error reporting improved considerably with the compiler's ability to report many errors simultaneously using more explicit error messages and new warnings for common mistakes.

To top it off, your scripts can now automatically generate documentation in the built-in help and the Inspector dock tooltips. This practical and time-saving feature was implemented by a student, Thakee Nathees ([ThakeeNathees](https://github.com/ThakeeNathees)), during the 2020 Google Summer of Code. You can read their report [here](/article/gsoc-2020-progress-report-1#gdscript-doc).
Despite growing in features, the GDScript runtime is much faster and more stable in Godot 4. This was achieved by a complete rewrite of the language backend by our main scripting maintainer George Marques ([vnen](https://github.com/vnen)). If you are interested in further reading, George provided several detailed reports on the new language features ([1](/article/gdscript-progress-report-new-gdscript-now-merged), [2](/article/gdscript-progress-report-feature-complete-40)) and on the decision-making process for the new language parser and runtime ([1](/article/gdscript-progress-report-writing-tokenizer), [2](/article/gdscript-progress-report-writing-new-parser), [3](/article/gdscript-progress-report-type-checking-back), [4](/article/gdscript-progress-report-typed-instructions)).
<a id="c"></a>
### C\#
The much-anticipated port to .NET 6 now allows users to target a newer framework that brings optimizations and new APIs. With .NET 6, projects use C# 10 by default and all features are available.
Of course, the 4.0 release is also a great opportunity to break compatibility and improve the API. If you’re a C# user we highly recommend checking out this [blog post by Raul Santos](/article/whats-new-in-csharp-for-godot-4-0/), to find out all that’s new in C# for Godot 4.0.
One of the most notable changes is the use of 64-bit types as scalar values. This means many APIs that used int or float now use long and double with the most noticeable being the `_Process` method. A Variant type is also now implemented in every API that takes variants where `System.Object` was used in the past. This brings some improvements such as avoiding boxing the values.
Another change worth mentioning is the ability to declare signals as C# events. Declaring signals is done by writing a delegate with the [Signal] attribute like in the past, but now the delegate name must end with the EventHandler suffix and an event will be generated. It can be used to connect to and disconnect from the signal. Speaking of signals, connecting them is easier than ever now that you can use C# lambdas without having to spread your code around files.
Finally, Godot 4 moves away from reflection, relying instead on source generators to improve performance, moving a lot of the work that we used to do at runtime to compile time. This also allows us to find and report errors when building the project instead of failing when running the game. We hope the new analyzers will help users write better code and avoid common pitfalls such as unsupported types in exported properties.
Currently, the .NET version of Godot still requires a separate build of Godot but a unified editor is planned for future releases.
**Important remarks:**
- The C# version of the Godot editor requires the .NET 6.0 SDK to be installed on your computer to work.
- Godot 4 doesn't support C# projects imported from Godot 3. It may be possible to edit your project files manually, but otherwise it's recommended to let Godot generate a new one.
- Currently, mobile and web platforms are not available. Support for them will likely come in Godot 4.1.
<a id="gdextension-experimental"></a>
### GDExtension - experimental
Sometimes user-level scripting is not enough. Being an open-source project, Godot has always valued extensibility.
With the existing GDNative API layer, you could already extend the engine without forking or recompiling it. But it was our first attempt at making a nice abstraction layer for engine internals that you could plug and play into. And so for all its benefits, GDNative didn't feel quite there yet.
With Godot 4, we introduce a new system called **GDExtension**. By design, it takes the best parts of creating GDNative extensions and writing custom engine modules using high performance languages such as C, C++ or Rust.
The code that you make with GDExtension can be ported into the engine if need be, and vice versa: Some engine parts can be made into a GDExtension library, reducing engine bloat. It also offers tighter integration into the editor now as you can expose your extension code as nodes and the engine will automatically generate help pages.
GDExtension was implemented by Juan and George, and further improved by many contributors, especially while porting the [official godot-cpp C++ bindings](https://github.com/godotengine/godot-cpp). Resident XR enthusiast and Godot contributor Bastiaan Olij ([BastiaanOlij](https://github.com/BastiaanOlij)) took the time to write a blog post to [introduce GDExtensions](/article/introducing-gd-extensions).
**Important remarks:**
- This feature is still experimental so it's reasonable to expect breaking changes as the API gets polished.
- Godot 3 GDNative libraries are not automatically compatible.
- Documentation is still a work in progress.
<a id="physics"></a>
## Physi
| |
140414
|
nalization
<a id="extended-language-support"></a>
### Extended Language Support
Localization is probably the most straightforward way to allow more people to experience your game or use your tool efficiently. As a tool itself, Godot 4.0 is the first to benefit from the new added support for bidirectional text and font ligatures. This means you can not only create games for a worldwide audience, but developers who use right-to-left languages themselves (Arabic, Urdu, Farsi, etc.) can now use Godot in the language they are most comfortable with.
<video autoplay loop muted playsinline>
<source src="/storage/blog/godot-4-0-sets-sail/08-ui-text-in-different-languages.mp4" type="video/mp4">
</video>
<a id="easier-translation-workflow"></a>
### Easier Translation Workflow
The second challenge of distributing your project to a wider audience is, of course, translation. Godot 4's editor can now generate Portable Object Template (or POT) translation files directly from your project's scenes and scripts. This makes it easy for translators to work with your content and produce complete translations. If your workflow uses other file formats, you can also add your own parser.
Godot 4's translation system is now context-aware. It allows you to have multiple translations of the same string depending on the context. It also supports plurals allowing for correct translation depending on the quantity.
Your localization efforts are further assisted by a built-in pseudolocalization tool. Implemented by Angad Kambli ([angad-k](https://github.com/angad-k)), a Google Summer of Code 2021 student, it allows to easily test the effects of diacritics and other font permutations on your UI without having to rely on actual translations to stress test your project. You can learn more about pseudolocalization features in the student's report [here](/article/gsoc-2021-progress-report-1#pseudolocalization).
<a id="editor-ux"></a>
## Editor & UX
Many of the exciting new features you can now leverage in your projects have also been applied to the editor itself to improve your experience. The new text rendering system and bidirectional text support is not the only example.
Further improving accessibility to a wider pool of users, the editor now features enhanced touch support for Android devices.
Another example of a practical feature you can already use in your own projects and that is being added to the editor itself is multi-window support. You can already move docks like the Inspector to other monitors, and more parts of the interface should support popping as separate windows in upcoming Godot 4 releases.

<a id="easier-importing"></a>
### Easier Importing
Importing is finally much easier. Resolving a major past pain point associated with importing 2D and 3D assets, Godot 4 now comes with a [dedicated import dialog](https://github.com/godotengine/godot/pull/47166). It allows you to preview and customize every part of the imported scene, its materials and physical properties. Scripts can still be used for additional tweaks, thanks to the [new plugin interface](https://github.com/godotengine/godot/pull/53813).

You should also notice a [significant bump](https://github.com/godotengine/godot/pull/47370) in textures import speed thanks to the etcpak library, and the new multi-threaded importer. Additionally, you can now [import your glTF files](https://github.com/godotengine/godot/pull/52541) at runtime, allowing for more modular 3D projects and tools made with the engine.
Give it up to K. S. Ernest Lee ([fire](https://github.com/fire)), who brought in his expertise as an import and usability specialist.
<a id="new-editor-features-widgets"></a>
### New Editor Features & Widgets
You will notice a myriad of new editor features and widgets created to simplify your workflow and give you better control.
The new command palette, added by a student during Godot Summer of Code this year, provides quick access to a lot of editor operations for keyboard-proficient users. Read a report by Bhuvaneshwar ([Bhu1-V](https://github.com/Bhu1-V)) [here](/article/gsoc-2021-progress-report-1#command-palette) to learn more about this feature.
The “default_env.tres” which added a fallback environment to all projects has been replaced by an in-editor default DirectionalLight3D and WorldEnvironment. This makes it easy to tweak lighting and effects and preview assets in the editor without the hassle of having to remember to manually disable your in-editor nodes at runtime. For more information, see the [blog post](/article/editor-improvements-godot-40).
The new color pickers with different picker shapes and color modes allow you to quickly select or update your project's color palette.

The new history dock shows your undo and redo history and lets you jump to any step very quickly. The undo history now works per scene, so pressing Ctrl Z will stick to the active scene.
<a id="inspector-dock-improvements"></a>
### Inspector Dock Improvements
The inspector dock has received its share of attention too. You can finally export your custom resource types from your scripts and directly reference nodes in the inspector, saving you time during development. Similarly, you can use annotations to draw sections and organize properties. You will also find it easier to edit arrays, dictionaries, and complex resources in the inspector, complete with pagination.
<a id="scene-dock-improvements"></a>
### Scene Dock Improvements
The scene dock offers new ways to search and filter nodes quickly, which is a big time saver for large scenes. Another big time saver has got to be the new and improved script templates, which can now be [customized per node type](https://github.com/godotengine/godot/pull/53957). The editor even comes with some handy physics body templates, courtesy of Fabrice.
<a id="script-editor-improvements"></a>
### Script Editor Improvements
The script editor has also leveled up. It now features greatly improved syntax highlighting, font ligatures, and multiple cursor support. You'll notice new icons in the margin indicating when you override a function and linking you to the parent implementation or to the documentation.
<video autoplay loop muted playsinline>
<source src="/storage/blog/godot-4-0-sets-sail/10-editor-multiple-cursors.mp4" type="video/mp4">
</video>
You can now edit various text-based data files in the script editor, such as JSON, YAML, and more.
One of the features already backported to Godot 3.5 is the ability to mark a node as unique in your scene. You can now apply this to multiple nodes simultaneously and quickly access these nodes in your scripts without writing their full paths. Nodes marked as unique are cached so the performance when accessing them is great.
Also, everything supports drag and drop to the script editor. You can control-click and drag multiple nodes to create on-ready variables, or just click and drag nodes or files into the script editor to get their path.
<a id="easier-version-control"></a>
### Easier Version Control
You will encounter fewer merge conflicts when using a version control system because resources are now assigned unique identifiers instead of relying on file paths.
The editor will also store the version last used to edit a project inside the project.godot file. This way you will be able to quickly check what version of Godot a project was created with. Additionally, the project manager will show a warning if you try to edit a project made with a different version of Godot, or a project made using unavailable engine features.
<a id="new-movie-maker-mode"></a>
### New Movie Maker Mode
Showcasing your progress is of course an integral part of your experience with a game engine. For that, Godot 4 introduces the new movie maker mode. It allows you to render scenes frame by frame at the maximum quality settings to record videos or trailer footage using the engine. Godot can render frames to a compressed AVI video or as a sequence of PNG images for lossless rendering.
<a id="new-editor-theme"></a>
### New Editor Theme
Finally, because design deeply matters when you're staring at your screen for hours, Hugo has also created a [new editor theme](https://github.com/godotengine/godot/pull/45607) with a more modern feel and improved color schemes for better accessibility. It can be tailored to your preferences through multiple theme customization options.
<a id="navigation"></a>
## Navigation
<a
| |
140437
|
Editor
<a id="node-copypaste"></a>
#### Node copy-pasting
Being able to easily cut/copy and paste nodes sounds like a basic feature to have, but it is only now that it [could finally be implemented in a reliable way](https://github.com/godotengine/godot/pull/34892), thanks to the hard work of Tomasz Chabora ([KoBeWi](https://github.com/KoBeWi)). Previously, to copy nodes within the scene, they had to be duplicated and dragged under the desired parent. Moving nodes between scenes was only possible by using the clunky "Merge from scene" feature. Being able to copy nodes as easily as you can copy text was probably one of the most-wanted features since the first release of Godot!
There were multiple attempts at implementing it, but it took time to refine them into someting reliable that could be merged, especially due to the need to take into account the full complexity of the scene tree (instanced scenes, editable children, shared or unique resources and subresources, etc.). In Godot 3.3, the dream has come true: nodes can be cut, copied and pasted, both within the same scene and between scenes. Manipulating the scene tree has never been this convenient.

*Hello again, [`logo.png` stickman!](https://twitter.com/01lifeleft/status/959761839897767936)*
<a id="subresource-editing"></a>
#### Improved Inspector sub-resource editing
[After much discussion](https://github.com/godotengine/godot-proposals/issues/2230), Juan Linietsky ([reduz](https://github.com/reduz)) implemented a change to the Inspector to [better highlight sub-resources visually](https://github.com/godotengine/godot/pull/45907), so that it's easier to know which resource you're editing when there are more than two levels.
This was implemented for Godot 4.0 but was fairly easy to backport, so here you go! The colors and contrast can be customized in the editor settings.

<a id="import-presets"></a>
#### Import presets configuration
When you add assets to a Godot project, most of them get imported to engine internal formats on the fly based on options configured in the Import dock. There are some pre-existing presets for all asset types, and you can define which preset should be used for all resources of the same type (e.g. "2D Pixel" preset for textures), but until there was no easy way to configure all presets easily in a unified interface.
For Godot 4.0, Juan implemented a new tab in the Project Settings dialog to [configure those "Import Defaults"](https://github.com/godotengine/godot/pull/46354), and this was backported to Godot 3.3.

Additionally, a new ["Keep" import mode](https://github.com/godotengine/godot/pull/47268) was added to configure specific files to be left as-is (i.e. not imported) by Godot's import system. This is particularly useful for files which you intend to process yourself from scripts based on their raw contents (e.g. using the `File` API, loading it as text or bytes), such as CSV files used as database (as opposed to Godot's default CSV import preset as translation catalogs).

<a id="3d-editor"></a>
#### 3D editor improvements
Contributors such as Aaron Franke ([aaronfranke](https://github.com/aaronfranke)), Joan and Hugo did a significant amount of work improving the usability of the 3D editor for 4.0, and most of it was backported to the 3.3 branch.
This includes changes such as:
- [Dynamic infinite 3D grid](https://github.com/godotengine/godot/pull/43206) ([further improved here](https://github.com/godotengine/godot/pull/45594)).
- [A much-improved 3D rotation gizmo](https://github.com/godotengine/godot/pull/43016), with [increased opacity for better visibility](https://github.com/godotengine/godot/pull/44384).
- [A better 3D selection gizmo](https://github.com/godotengine/godot/pull/40106).

*To infinity and beyond!*
<a id="detect-scene-changes"></a>
#### Detect external scene changes
One of the biggest hurdles when working with Godot projects in a team was that it's very easy to overwrite changes made by another person if they modified a currently opened scene. How often did you pull changes from Git to only see them discarded, because Godot didn't detect that the scene had changed? While with scenes you just had to reload them, modifications to `project.godot` by another team member required you to restart the Godot editor to properly apply changes. This was especially problematic during game jams where multiple people work on the same small project simultaneously.
Thanks to Tomasz again, with Godot 3.3, any external changes to opened scenes or `project.godot`, be it <abbr title="Version Control System">VCS</abbr> pull or external text editor modification, are [properly detected by the Godot editor](https://github.com/godotengine/godot/pull/31747) and you get an option to either reload the affected files, discard the changes or do nothing (which in most cases means another prompt when the editor is re-focused).

Do note that due to how built-in resources (resources saved within the scene instead of separate files) work, some of them might sometimes not get reloaded correctly (this especially applies to built-in scripts). It's a known infrequent issue, already fixed in Godot 4.0.
<a id="scripting"></a>
### Scripting
<a id="gdscript"></a>
#### GDScript
No big change for GDScript in this release as all the focus has been on the rewrite and optimization of GDScript for Godot 4.0.
Still, there's been a [number of bugfixes](https://github.com/godotengine/godot/pulls?q=is%3Apr+sort%3Aupdated-desc+milestone%3A3.3+label%3Atopic%3Agdscript+is%3Amerged) which should make the experience more stable.
As for eye candy, Yuri Roubinsky ([Chaosus](https://github.com/Chaosus)) implemented a feature to [preview `Color` constants in the auto-completion drop-down](https://github.com/godotengine/godot/pull/43026):

<a id="mono"></a>
#### Mono/C#
C# users will benefit from a [redesign of the solution build output panel](https://github.com/godotengine/godot/pull/42547) made by Ignacio Roldán Etcheverry ([neikeq](https://github.com/neikeq/)):

There have been further fixes to the solution and build system, allowing users to [target .NETFramework with the Godot.NET.Sdk and .NET 5](https://github.com/godotengine/godot/pull/44135).
Moreover a 3.2.2 regression was fixed for [`System.Collections.Generic.List` marshalling](https://github.com/godotengine/godot/pull/45029), and [Unicode identifiers are now properly supported](https://github.com/godotengine/godot/pull/45310).
There's also been [extensive](https://github.com/godotengine/godot/pull/44373) [work](https://github.com/godotengine/godot/pull/44374) on Mono compatibility with WebAssembly.
<a id="other-areas"></a>
### O
| |
140466
|
---
title: "Core refactoring progress report #2"
excerpt: "As promised in my previous post, the core refactoring work I am undertaking took two months to complete. This means rewriting large parts of the core engine for consistency and features."
categories: ["progress-report"]
author: Juan Linietsky
image: /storage/app/uploads/public/5e7/f74/307/5e7f74307ce34896455561.jpeg
date: 2020-03-28 00:00:00
---
As promised in my previous post, the core refactoring work I am undertaking took two months to complete. This means rewriting large parts of the core engine for consistency and features.
### Core refactoring
Core refactoring is mostly work on the most low level, critical and shared parts of the engine. This work is done only on major versions because it implies breaking compatibility and introducing instability and bugs (because of all the new added code), which is actually the case right now. The major refactoring undertaken for Godot 3.x, two years ago, changed a large part of the internals, but we were unable to do everything we wanted to do.
The migration to Vulkan already implied breaking compatibility and, together with all the user feedback we got since 3.x was released, we have a very good idea of what needed to change in this time.
So following is the list of what changed during March:
#### OS / DisplayServer split
One of the largest singletons in Godot is the [OS](https://docs.godotengine.org/en/3.1/classes/class_os.html) class. It allows access to low-level OS functions as well as window management.
This was cumbersome for many reasons and also imposed limitations, such as:
* Having a large, bloated OS class on every platform.
* Inability to support different display APIs in a single binary. This is specially visible in Desktop Unixes, where you can use X11, Wayland or even EGL directly on Raspberry PI.
* Proper support for multiple windows. This not only allows the editor to make some docks float so you can move them to another monitor (a very requested feature by users). This is also useful for games developed for certain types of dedicated hardware, or just for tools created with Godot as the base platform (which is something several users do).
* Impossible to run the engine as headless (with a dummy display driver) in all platforms. The "server" platform had to be used as a workaround (and will now be deprecated).
* Difficult to abstract window management functions to users, which had to access the OS class directly (and which is not as user friendly as working with nodes).
The new implementation moves all low level window management to a new singleton, **DisplayServer**, which handles everything regarding display. To make implementation simpler and more portable, Godot will always assume you have one _main window_ and, if platform supports, allow you to create _sub windows_.
#### Window node
The new **DisplayServer** allows managing multiple windows, but using it directly is still too low level and unfriendly for most users. To compensate, a new **Window** node has been introduced. This node inherits **Viewport** and gives it the ability to appear as a floating window. Working with it is easy: just place your nodes as children of Window! You can create UIs (with controls), or display the 3D world on it (just adding a **Camera** node). It' s basically the same as a Viewport, so Using this new node is extremely easy and straightforward.

Additionally, the root node in Godot (```get_tree().get_root()```), which used to be of type **Viewport**, has now changed to be of **Window** type. If you want to manage the game window, simple access this node directly.
All input events, which were previously sent to the *MainLoop* class, are now sent directly from **DisplayServer** to *Window* (so MainLoop and hence, *SceneTree* have been stripped of this functionality).
#### Multiple Window Support in Editor
Likewise, the Godot Editor now supports making the docks float. For now, only docks and existing windows are supported but we will extend support for other parts of the editor.
It is important to note that by default **docks will remain *docked* ** and nothing will change. Some users expressed concerns that we would now force them to always use floating windows. This **is not the case**, you can make windows separate from the main one **only if you want** (like as an example, you have a second monitor and you want to make use of more screen-space), but by default **nothing will change**.
#### Embedded mode
But, what if you are working on a full-screen game and need to use windows? Or what about platforms which don't support floating windows such as iOS, Android, HTML5 or even consoles?
One of the new features of this system is that the **Viewport** class can now be instructed to embed all children **Window** nodes and provide internal windows for them, so it will emulate a window manager within it, including decorations, resizing, title bar, close button, etc. This can be done manually by toggling the "embed subwindows" property.
At the same time the new **DisplayServer** can be queried for features and one of them is *subwindow* support, so the new root **Window** will check whether this is supported on each platform and toggle the property automatically. This is completely transparent to the user, so games (or the editor) don't need to be change to run on platforms that don't support subwindows.

If, for debug purposes, you want to run the editor (or your game) using subwindows instead of hardware windows, use the ```--single-window``` command line flag.
### Node renames
The Godot scene system is known for its ease of use and its ability to represent your mental map as a data and file structure. That said, many node names were not clear or confusing. This is mainly evidenced when comparing their 2D and 3D counterparts.
Contrary to what many believe, Godot started as a 3D engine, but soon migrated to be a 2D one. This is why the 3D nodes don't have any suffix (like Area) while the 2D ones do (Area2D). This made it very misleading for users, where its not always obvious whether you are using the 2D or 3D versions.
To solve this, Godot 4.0 will rename all 3D nodes and give them proper suffixes. Nodes like "Area","RigidBody" or "Light" will become "Area3D", "RigidBody3D" and "Light3D" respectively.
Additionally, due to popular demand, the "Spatial" node will be renamed to "Node3D", to further enhance consistency with the 2D engine.

A compatibility system has been added so older scenes will convert the node types to the new ones on load.
### Server renames
Most servers in Godot are very old, and their naming conventions were by now obsolete. Because of this, most are being renamed:
* **VisualServer** (a name that became even more ambiguous thanks to the introduction of DisplayServer) has been renamed to **RenderingServer**.
* **NavigationServer** and **PhysicsServer** have been renamed to **NavigationServer3D** and **PhysicsServer3D** respectively.
* Likewise, to add more consistency, **Physics2DServer** and **Navigation2DServer** are now **PhysicsServer2D** and **NavigationServer2D**.
### Future
My work on core refactoring is mostly done, so next month (April) I will go back to working on Vulkan as promised. Hope to have new and exciting stuff to show by the end of next month!
And as always, please remember than our work on Godot is done out of love for you and the game development community, we want to provide you with a top notch free and open source game engine, so you can own your work down to the last line of engine code. If you are not yet, please consider becoming [our patron](https://www.patreon.com/godotengine) and help us realize this dream sooner.
| |
140473
|
---
title: "Current state of C# platform support in Godot 4.2"
excerpt: "How the transition to a unified .NET has impacted platform support, and re-adding the ability to port to mobile."
categories: ["progress-report"]
author: Raul Santos
image: /storage/blog/covers/progress-update-csharp-2.webp
date: 2024-01-26 17:00:00
---
With the recent release of Godot [4.2](/article/godot-4-2-arrives-in-style), projects that use C# can now export to Android and iOS. Let's take a look at the current platform support for C# projects and what to expect from future releases beyond 4.2.
## Background
First a bit of history. Godot 3 supports exporting C# projects to all the platforms supported by the engine. The C# implementation uses the [Mono embedding APIs](https://www.mono-project.com/docs/advanced/embedding/) and the [Mono runtime](https://www.mono-project.com/docs/advanced/runtime/). **Mono** is an open source cross-platform implementation of the Windows-only .NET Framework.
With the [4.0 release](https://godotengine.org/article/godot-4-0-sets-sail/), the C# integration moved away from the Mono embedding APIs and replaced it with the .NET Core hosting APIs (using the `hostfxr` library). This allowed us to modernize the codebase and prepare for [.NET 5](https://devblogs.microsoft.com/dotnet/introducing-net-5/), the first release in the .NET unification journey. Unfortunately, in this move C# projects lost the ability to export to platforms other than Desktop (Windows, macOS and Linux).
Before **.NET unification** there was the Windows-only .NET Framework and the cross-platform Mono and .NET Core. Unification means there will be just one .NET going forward, so the next release after .NET Core 3.0 was named .NET 5, and Mono and .NET Framework won't have any new major releases.
The term **Mono** can be used to refer to a collection of technologies. With .NET unification, the Mono framework is deprecated, but the runtime is still supported. Unified .NET uses both the CoreCLR and the Mono runtimes, but Mono is the only runtime that supports mobile and web platforms.
However, in .NET 7.0 a new runtime became available, [**NativeAOT**](https://learn.microsoft.com/en-us/dotnet/core/deploying/native-aot), which allows publishing native binaries built for a specific platform, instead of the JIT-based portable binaries. In .NET 7.0, NativeAOT only supports Windows and Linux, but in .NET 8.0 it adds support for macOS and experimental support for Android and iOS. This means in .NET 8.0 NativeAOT can be used as an alternative to Mono for mobile platforms.
| |
140487
|
---
title: "Web Export in 4.3"
excerpt: "With single-threaded builds and sample playback, it's now easier than ever to export your game to the Web with Godot 4.3. And more!"
categories: ["progress-report"]
author: Adam Scott
image: /storage/blog/covers/progress-report-web-export-in-4-3.webp
image_caption_title: "Catburglar"
image_caption_description: "An open source game by @JohnGabrielUK and his team"
date: 2024-05-15 13:15:00
---
Have you ever begun some type of work, and only after realized how little you actually knew? That happened to me during [the last <abbr title="Game Developers Conference">GDC</abbr>](https://godotengine.org/article/gdc-2024-retrospective/).
A few months ago, I took over [Fabio Alessandrelli's (@faless)](https://github.com/Faless/) responsibilities as [Web Platform Lead](https://godotengine.org/teams/#platforms) for Godot to reduce his task load and accelerate the pace at which our Web platform exports continue to improve.
I'm well used to the Web and its quirks. I began creating websites in <abbr title="Extensible Hypertext Markup Language">XHTML</abbr> and (vanilla) JavaScript, back in the days. Flash games were my jam! Enough so that I even worked for a video game studio as an ActionScript 3 developer in 2010. And I didn't really stop caring about the Web platform ever since.
Since then, Flash may have died, but online games didn't.
# The new situation
But what I realized at the <abbr title="Game Developers Conference">GDC</abbr> is that we're entering into a sort of Golden Age of Web games. Not only websites like [Poki](https://poki.com/) or [Crazy Games](https://www.crazygames.com/) are super popular, but big players are starting to integrate Web games into their services, such as
[Discord Activities](https://support.discord.com/hc/en-us/articles/4422142836759-Activities-on-Discord) or [YouTube Playables](https://www.youtube.com/playables). All these entities want developers to create games for their platform, and they are all asking how Godot can bring a first-class development experience to it.
# G
| |
140488
|
odot 4.3
Making games for the Web using Godot 4.x still isn't as seamless as we would like it to be. Unfortunately serious revisions are needed to improve the experience to the extent that we want.
Godot 4.3 promises to be one of the best recent releases for Web exports. One of the biggest issues relating to this has been properly fixed: single-threaded exports.
## Single-threaded Web export
### Betting on the wrong horse
[`SharedArrayBuffer`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/SharedArrayBuffer)s were supposed to revolutionize the Web. And they did. That API permits to share memory between [Web Workers](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers) (which are the Web native "threads").
Unfortunately, we live in the same timeline that includes the [Spectre and Meltdown exploits](https://meltdownattack.com/).
The result of this is that [browsers greatly reduced where and how you can use that API](https://developer.chrome.com/blog/enabling-shared-array-buffer?hl=en). Namely, browsers nowadays require websites to be [cross-origin isolated](https://web.dev/coop-coep/). When isolated, it does unlock the potential of `SharedArrayBuffer`s, but at what cost?
At the cost of having the capacity to make remote calls to other websites. _Adieu_ game monetization. _Adieu_ interacting with an external API.
The issue is that during the development of Godot 4, we bet on the wrong horse: we went all-in with using threads with `SharedArrayBuffer`s. We thought it was mainly a question of [browser support](https://caniuse.com/sharedarraybuffer) (which took a step back due to the aforementioned security issues), so when all browsers finally shipped stable releases with `SharedArrayBuffer`, we streamlined everything around multi-threading.
But we underestimated the difficulty of configuring cross-origin isolated websites.
We know it has been a hurdle for a lot of users trying to publish their jam games on [itch.io](https://itch.io), which has an experimental `SharedArrayBuffer` option, but relying upon another feature ([`coep:credentialless`](https://caniuse.com/mdn-http_headers_cross-origin-embedder-policy_credentialless)) not supported by Safari or Firefox for Android).
Likewise, most Web games are being published on third-party publishing platforms where the game authors may not even be able to set the required headers for `SharedArrayBuffer` support.
### Backporting the single-threaded feature from Godot 3.x
At the end of the development of Godot 4.2, [the Foundation](https://godot.foundation/) tasked me to find a way to backport the ability to build Godot without using threads in order to export Godot games without the pesky <abbr title="Cross-Origin-Opener-Policy">COOP</abbr>/<abbr title="Cross-Origin-Embedder-Policy">COEP</abbr> headers.
This was not easy because 4.0 was made with threads in mind. It maybe explains why we were so hesitant to go ahead with this change. Fortunately, with the help of our thread guru [Pedro J. Estébanez (@RandomShaper)](https://github.com/RandomShaper), I was able to tame the beast and put back single-threaded builds.
[My <abbr title="Pull Request">PR</abbr>](https://github.com/godotengine/godot/pull/85939) was finally merged at the beginning of the Godot 4.3 development period.

It even had some unexpected benefits. Apple devices (macOS and iOS) were long known to have issues when playing Godot Web exports. Well, when you do export your game single-threaded, these issues fortunately disappear.
However, it came with a downside too: it introduced a new, virtually unsolvable bug:
The single-threaded build introduced garbles in the audio of games, making them unplayable on low-end machines such as laptops or phones, or high-end machines with high frame rates.
When I mean unplayable, I mean it. [Try it yourself.](https://adamscott.github.io/2d-platformer-demo-main-thread/){:target="_blank"} (Consider yourself lucky if it doesn't glitch out.)
_Please note: we know that there's some issues running Platformer 2D on Safari, namely the browser reloading the page complaining that "this webpage is using significant memory". We're currently investigating to fix this <abbr title="as soon as possible">ASAP</abbr>._
Yes. Web single-threaded audio stream playback with Godot 4.2 is that bad. It is marginally better on Godot 3.x, but the problem exists there as well.
## Sample playback to the rescue!
[Here comes my <abbr title="Pull Request">PR</abbr>](https://github.com/godotengine/godot/pull/91382) (not yet merged at the time of writing) for the rescue. [Here's the same demo without sound issues.](https://adamscott.github.io/2d-platformer-demo-main-thread-samples/){:target="_blank"}
### What are samples?
Using samples for games is not a novel idea. In fact, for a long time, it was the _de facto_ way to play sound and music on game consoles. As they didn't have the processing power to do the mixing on the CPU, you could send audio clips (hence the term "sample") to a special chip and tell it how and when to play them.
Since the audio chip was operating on its own, the sound would never lag nor glitch out if the game is lagging.
### (Re)introducing samples to Godot
When open sourced, the Godot engine offered two ways to emit audio: streams for music and samples for sounds.

But when Godot 3 released, it was decided that samples support was not needed anymore. Game consoles nowadays ask developers to mix themselves the samples using the CPU. And for the Web, we could mix the audio using a dedicated thread, like other platforms since `SharedArrayBuffer`s exist.
This is why there's audio garbles in single-threaded builds (without my "fix"). Audio rendering is tied with the frame rate. And if it drops, there's not enough audio frames to fill out the buffer, hence the glitches.
What [my <abbr title="Pull Request">PR</abbr>](https://github.com/godotengine/godot/pull/85939) does is to reinstate sample playback. At least at the time of writing, for the Web platform only.
### Near seamle
| |
140553
|
---
title: "Dev snapshot: Godot 3.0 alpha 2"
excerpt: "One step closer to the release of Godot 3.0! With this alpha 2 development snapshot, Godot users will be able to preview the upcoming C# support and continue testing the advanced 3D features introduced in Godot 3.0. This snapshot is of course expected to be buggy and unstable, so please be aware that it does not reflect the final state of what Godot 3.0 will be like."
categories: ["pre-release"]
author: Rémi Verschelde
image: /storage/app/uploads/public/59f/8c3/c91/59f8c3c91a87f065753289.png
date: 2017-10-31 18:23:17
---
A little treat (or is it a trick?) for our community on this Halloween eve: Godot 3.0 *alpha 2* is out, ready for your testing! It's already been 3 months since our previous [official development snapshot](/article/dev-snapshot-godot-3-0-alpha-1), and lots of bugs have been fixed, making us one big step closer to the final 3.0 *stable* release.
It's also the first build to include the long awaited support for the C# programming language using [Mono](http://mono-project.com/)! This is of course still pretty rough, though usable, and we are looking forward to your feedback and bug reports. Some caveats are documented below as well as in the [introduction blog post](/article/introducing-csharp-godot), so make sure to read them before filing issues.
## Disclaimer
**IMPORTANT: This is an *[alpha](https://en.wikipedia.org/wiki/Software_release_life_cycle#Alpha)* build, which means that it is *not suitable* for use in production, nor for press reviews of what Godot 3.0 would be on its release.**
There is still a long way of bug fixing and usability improvement until we can release the stable version, and this release comes with incomplete documentation: the in-editor and [online Class Reference](http://docs.godotengine.org/en/latest/) is quite complete thanks to the awesome work of our documentation team, but there aren't many tutorials about using Godot 3.0 yet. For the Mono build, there is no specific documentation yet. This release is exclusively for testers who are already familiar with Godot and can report the issues they experience on GitHub.
There is also no guarantee that projects started with the alpha 2 build will still work later builds, as we reserve the right to do necessary breaking adjustments up to the *beta* stage.
**Note:** New Godot users should *not* use this build to start their learning. Godot 2.1 is still supported and [well documented](http://docs.godotengine.org/en/stable/).
## The features
Since the previous alpha build, there have been hundreds of bugs fixed, as well as many usability enhancements to make the new features as easy to use as possible.
There was also a [strong focus on the documentation](https://godotengine.org/article/first-godot-3-docs-sprint-sept-9), with the Class Reference close to 70% complete now (which is already much higher than the completion level of the 2.x API documentation).
### Quick Mono howto
Of course, the main feature many have been waiting for is the Mono support. It comes in separate binaries with an additional system requirement: the Mono SDK. You need to install the [current stable version](http://www.mono-project.com/download/) of Mono to use with Godot, as you will be developing applications which require the .NET Framework.
If you installed Mono in the classical system directories (using the upstream macOS or Windows installers, or the Linux repositories), everything should work out of the box.
If you installed Mono in a specific directory, things might get a bit more complex. You can override the `MONO_PATH` environment variable to point to the location of your .NET Framework 4.5, typically `/path_to_mono_root/lib/mono/4.5/`. You will also need `msbuild` in your `PATH`, so if you installed it in a location which is not included in the `PATH`, you can either override the latter or create a symbolic link.
## Downloads
The download links are not featured on the [Download](/download) page for now to avoid confusing new users. Instead, browse one of our mirrors and download the editor binary for your platform and the export templates archive:
- Classical version: [[HTTPS mirror](https://downloads.tuxfamily.org/godotengine/3.0/alpha2)] [[HTTP mirror](http://op.godotengine.org:81/downloads/3.0/alpha2)]
- Mono version (requires the Mono SDK): [[HTTPS mirror](https://downloads.tuxfamily.org/godotengine/3.0/alpha2/mono)] [[HTTP mirror](http://op.godotengine.org:81/downloads/3.0/alpha2/mono)]
**Note:** Export templates are currently missing due to a last minute regression in the HTML5 platform (**Edit 2017-10-31 23:00 UTC:** They are now available for the classical version).
Export templates for the Mono flavour will not be provided, as exporting Mono games is not completely implemented yet.
Also clone the [godot-demo-projects](https://github.com/godotengine/godot-demo-projects/) repository to have demos to play with. Some of them might still need adjustments due to recent changes in the *master* branch, feel free to report any issue.
## Bug reports
There are still many open bug reports for the 3.0 milestone, which means that we are aware of many bugs already. We still release this snapshot to get some early feedback while we work on fixing the known issues.
As a tester, you are encouraged to open bug reports if you experience issues with alpha 2. Please check first the [existing issues](https://github.com/godotengine/godot/issues), using the search function with relevant keywords, to ensure that the bug you experience is not known already.
Have fun with this alpha 2 and stay tuned for future, more stable releases :)
| |
140561
|
## Downloads
{% include articles/download_card.html version="4.1.4" release="rc2" article=page %}
**Standard build** includes support for GDScript and GDExtension.
**.NET 6 build** (marked as `mono`) includes support for C#, as well as GDScript and GDExtension.
- .NET build requires [.NET SDK 6.0](https://dotnet.microsoft.com/en-us/download/dotnet/6.0) or [7.0](https://dotnet.microsoft.com/en-us/download/dotnet/7.0) installed in a standard location.
{% include articles/download_card.html version="4.2.2" release="rc2" article=page %}
**Standard build** includes support for GDScript and GDExtension.
**.NET build** (marked as `mono`) includes support for C#, as well as GDScript and GDExtension.
- .NET build requires [.NET SDK 6.0](https://dotnet.microsoft.com/en-us/download/dotnet/6.0), [7.0](https://dotnet.microsoft.com/en-us/download/dotnet/7.0), or [8.0](https://dotnet.microsoft.com/en-us/download/dotnet/8.0) installed in a standard location.
- To export to Android, .NET 7.0 or later is required. To export to iOS, .NET 8.0 is required. Make sure to set the target framework in the `.csproj` file.
{% include articles/prerelease_notice.html %}
## Known issues
There are currently no known issues introduced by these releases.
With every release we accept that there are going to be various issues, which have already been reported but haven't been fixed yet. See the GitHub issue tracker for a complete list of [known bugs](https://github.com/godotengine/godot/issues?q=is%3Aissue+is%3Aopen+label%3Abug+).
## Bug reports
As a tester, we encourage you to [open bug reports](https://github.com/godotengine/godot/issues) if you experience issues with this release. Please check the [existing issues on GitHub](https://github.com/godotengine/godot/issues) first, using the search function with relevant keywords, to ensure that the bug you experience is not already known.
In particular, any change that would cause a regression in your projects is very important to report (e.g. if something that worked fine in previous 4.x releases no longer works).
## Support
Godot is a non-profit, open source game engine developed by hundreds of contributors on their free time, as well as a handful of part or full-time developers hired thanks to [generous donations from the Godot community](https://fund.godotengine.org/). A big thank you to everyone who has contributed [their time](https://github.com/godotengine/godot/blob/master/AUTHORS.md) or [their financial support](https://github.com/godotengine/godot/blob/master/DONORS.md) to the project!
If you'd like to support the project financially and help us secure our future hires, you can do so using the [Godot Development Fund](https://fund.godotengine.org/) platform managed by [Godot Foundation](https://godot.foundation/). There are also several [alternative ways to donate](/donate) which you may find more suitable.
| |
140610
|
---
title: "Godot 3.1 will get many improvements to KinematicBody"
excerpt: "One of the features that make Godot stand out is how easy it is to use the physics engine for non-physics games. For Godot 3.1, several improvements are being worked on."
categories: ["progress-report"]
author: Juan Linietsky
image: /storage/app/uploads/public/5b4/de9/702/5b4de97024c77681425121.gif
date: 2018-07-17 00:00:00
---
## KinematicBody
One of the features that make Godot stand out is how easy it is to use the physics engine for non-physics games.
KinematicBody allows controlling a character entity around with a single function (`move_and_slide`). Simply pass a linear velocity, and it will be returned back adjusted while the player moves around the level.
```
velocity = move_and_slide(velocity)
```
This function tries to move the character using that velocity and every time a collision is found, it will slide against it (and adjust the velocity accordingly). Some extra features are also present, such as specifying the floor direction:
```
velocity = move_and_slide(velocity, Vector2(0, -1)) # floor points up
```
After the call, detecting if a character is on floor can be done with a call to:
```
is_on_floor()
```
Likewise for walls. This allows adjusting the player animation accordingly. Godot also detects if the floor below is moving, and it will adjust the character accordingly too.
Unfortunately, despite the ease of use, this approach had a few limitations that could (or not) be evident depending on the type of game you were working on.
## Snapping
The most obvious problem with this approach is snapping to the floor. As the character slides around the level, some situations may lead to the player flying around:

In some games, this effect may be desired (looks kind of cool), but in other types of games this is unacceptable.
To easily solve this, Godot 3.1 will introduce a new function: ***`move_and_slide_with_snap`***
This function allows a third parameter for a snap vector. It means that, while the character is standing on the floor, it will try to remain snapped to it. If no longer on ground, it will not try to snap again until it touches down.
The snap argument is a simple vector pointing towards a direction and length (how long it should try to search for the ground to snap)
```
# snap 32 pixels down
velocity = move_and_slide_with_snap(velocity, Vector2(0, -1), Vector2(0, 32))
```
This works very well in practice:

Of course, you must make sure to disable snap when the character jumps. Jumping is usually done by setting `velocity.y` to a negative velocity (like -100), but if snapping is active this will not work, as the character will snap back to the floor.
Disabling snap can be done by just passing a snap of length 0 (`Vector2()`) to `move_and_slide_with_snap()` or just by calling `move_and_slide()` instead.
A common trick for this is to have a `jumping` boolean variable and toggle it on when the character jumps:
```
# jump logic example with snapping
if is_on_floor() and Input.is_action_just_pressed("jump"):
# can jump only when on floor
velocity.y = -100
jumping = true
# disable jumping when character is falling again
if jumping and velocity.y > 0:
jumping = false
# preset for snap, 32 pixels down
var snap = Vector2(0, 32)
# oh, but are we jumping? no snap then
if jumping:
snap = Vector2()
velocity = move_and_slide_with_snap(velocity, Vector2(0, -1), snap)
```
## RayCast Shapes
Another common situation (when using `move_and_slide()`) is that the character incurs more effort going up slopes than it does going down.

This is physically correct, but some types of game expect velocity to be constant when moving up and down in slopes.
The solution for this is now the same as when creating a dynamic character controller (physics-based), which is using RayCast shapes. They work now with KinematicBody.
RayCast shapes separate the body in the direction they are pointed to, and will always return a collision normal along their ray, instead of the one provided by the floor (because, again, their goal is to separate). Yes, the name is confusing, maybe they should be renamed to SeparationRayShape in Godot 4.0 :)
In any case, change the collision shape in your body to include a ray pointing down:

And now, when moving around the stage, the speed will remain constant:

Keep in mind that the maximum slope angle where this will work will be geometrically determined by the length of the ray, so make sure to adjust the ray length according to the approximate slope you want to support:

As you can see in the picture above, the right slope is so steep that it hits the capsule before the ray, so this effectively disables the ray effect and makes the character incur a lot more effort. It happens naturally, without a single line of code needing to be written.
## Sync to Physics
When a KinematicBody is moved around (be it using `move_and_slide`, or just moved around by modifying the `position`, `rotation`, etc. properties), it automatically computes its linear and angular velocity. This means that if either a box with physics (RigidBody) or a KinematicBody (that moves with `move_and_slide`) are standing over it, they will be moved along together.
This makes KinematicBody also very useful for moving platforms, elevators, scripted doors, etc.
The main limitation until now is that, when used as moving platform, its motion was always a frame ahead than the physics, so the character appears to slowly slide over it:

To compensate with this, a new option "*Sync to Physics*" was added. This option is useful when animating via AnimationPlayer or tweaking the position/rotation properties manually in the object:

When enabled, objects will appear in sync with the KinematicBody:

## Future
After these functions are well tested, they will be ported to the 3D engine, where they should work in the exact same way.
Once again, please remember that all this done with love for everyone. Our aim is to make a 100% free and open source game engine where the games you make with it are truly yours.
This progress is made possible thanks to the infinite generosity of our patrons. If you are not one yet, please consider [becoming our Patron](https://www.patreon.com/godotengine). This way, you don't need to be an expert engine programmer to aid with Godot development :)
| |
140616
|
---
title: "Godot 3.0 switches to Bullet for physics"
excerpt: "When Godot started (a decade ago), there were not many good physics engine available and Godot always had quite demanding API requirements for them (such as Area nodes, KinematicBody, RayCast shapes, etc.), so they were not usable without a lot of modification. This led us to implementing our own custom engine. Now, thanks to the work of Andrea Catania, we are introducing Bullet as a new and better maintained backend for the 3D physics!"
categories: ["progress-report"]
author: Juan Linietsky
image: /storage/app/uploads/public/59f/f1b/66d/59ff1b66d1fbe773318157.png
date: 2017-11-05 00:00:00
---
***Update:** Due to several circumstances (such as a shift of focus from upstream Bullet developers away from game development), Godot 4.0 is going back to its in-house physics engine for 3D (GodotPhysics). Bullet is no longer available in Godot 4.0, but it (or other physics engines like [Jolt](https://github.com/godot-jolt/godot-jolt)) may be implemented as an add-on using [GDExtension](https://godotengine.org/article/introducing-gd-extensions).*
___
## Godot's physics engine
Back at the first versions of Godot (a decade ago), not many physics engines existed or were available. Even if a few were, Godot always had quite demanding API requirements for them (such as Area nodes, KinematicBody, RayCast shapes, etc.), so they were not usable without a lot of modification. This led me to do some research and write my own.
Over time this became quite a hassle, because maintaining a physics engine and keeping it up to date with the new techniques and algorithms is time consuming.
## Introducing Bullet
Godot always supported an abstract physics interface, so Andrea Catania (Odino) volunteered to add Bullet support as a backend. I initially though it would not be possible to replicate Godot's API in Bullet faithfully, but Andrea proved me wrong and did a fantastic job. He also finished before the Beta deadline, so his work was just merged and will be present in Godot 3.0.
Physics should work just like before, and no code should change, except Bullet is being used internally.
Godot's old physics engine is provided for compatibility and can be selected in the project settings, but will likely be removed by the time 3.1 is out.
## New possibilities
With Bullet as physics backend, new possibilities emerge, such as soft body, cloth support and GPU (OpenCL) physics.
This will be added after 3.0 is out, likely for 3.1.
## Will it work for 2D physics?
No. Godot 2D and 3D physics engines are separated. Our 2D physics also has considerably more customization code, such as one way collisions for both kinematic and rigid bodies. The 2D physics engine will likely remain custom and be improved after 3.0 is out.
| |
140622
|
new Variant type
Previous versions of C# in Godot have used `System.Object` to represent Godot's Variant type, which is a special container for various data types supported by the engine. The problem with that is that `System.Object` is the base type for all .NET types, including types that are not supported by Godot. This means that the Godot API allowed using types that would fail at runtime since we wouldn't be able to convert it to a type that Godot would understand.
Now we have a dedicated `Variant` struct that represents all the types that Godot supports. This means our API is more strict about which types you can use, and you will get errors at compile time before executing your game.
The way the `Variant` struct is implemented also avoids boxing value types (such as `int` and `float`) in cases where it was unavoidable in Godot 3.
When using `System.Object`, users have been able to take advantage of .NET features such as pattern matching and generics. The new `Variant` struct no longer allows to use those features directly. Pattern matching can be replaced with checking the `Variant.VariantType` property, while generics can be supported using the `[MustBeVariant]` attribute.
For more information about how Variants are supported in C#, take a look at the new documentation page about [_C# Variant_](https://docs.godotengine.org/en/latest/tutorials/scripting/c_sharp/c_sharp_variant.html).
#### Relevant links
- C#: Represent Variant as its own type instead of System.Object ([GH-3837](https://github.com/godotengine/godot-proposals/issues/3837)).
- Merge .NET 6 branch with master ([GH-64089](https://github.com/godotengine/godot/pull/64089)).
- C#: Optimize Variant conversion callbacks ([GH-68310](https://github.com/godotengine/godot/pull/68310)).
## Collections changes
Just like in 3.x, `Array` and `Dictionary` collections are supported in C# through the types defined in the `Godot.Collections` namespace. Godot's packed arrays, a special type of arrays optimized for memory usage, don't have their own representation in C#, and normal C# arrays should be used (e.g. `byte[]`).
Godot APIs are only able to understand these Godot collections, which means anything that interops with Godot's native side needs to use one of the supported collection types.
In 3.x, the non-generic collections used `System.Object`, just like .NET non-generic collections. However, thanks to the new [`Variant`](#the-new-variant-type) type, Godot C# collections now implement the generic collection interfaces where `T` is `Variant`. This means that the non-generic collections now only support Variant-compatible types, so you'll get compile errors when using an invalid type rather than a runtime exception.
The generic collections will also validate that the generic `T` parameter is one of the Variant-compatible types, since it's annotated with the new `[MustBeVariant]` attribute.
Godot collections now implement more utility methods that expose similar functionality to what can be done in other supported languages. Using these instance methods is generally faster than Linq because it avoids marshaling every item in the collection. On top of that, collections also provide an extra utility method, `MakeReadOnly`. It allows you to freeze the collections so modifying them is no longer possible.
You are not limited to using Godot collections in your own internal logic, of course. The Base Class Library in .NET includes many other collections that can be used. These collections are not going to work with Godot's API but can still be useful. In 3.x, some .NET collections were supported in very specific scenarios (such as exporting properties). This is no longer supported in 4.0 since it required reflection. Support for .NET collections may be re-introduced in the future, implemented with source generators instead of reflection.
To help you choose which collection type to use and when, we have a new documentation page all about [_C# Collections_](https://docs.godotengine.org/en/latest/tutorials/scripting/c_sharp/c_sharp_collections.html).
#### Relevant links
- Merge .NET 6 branch with master ([GH-64089](https://github.com/godotengine/godot/pull/64089)).
- C# Export on List not possible anymore ([GH-70298](https://github.com/godotengine/godot/issues/70298)).
- Sync C# Array with Core ([GH-71786](https://github.com/godotengine/godot/pull/71786)).
- Sync C# Dictionary with Core ([GH-71984](https://github.com/godotengine/godot/pull/71984)).
## Signals as events
Godot signals are a great way to implement the Observer pattern. It's possible to use Godot signals by using the common engine API, such as the `Connect` and `EmitSignal` methods. But C# developers are more used to using events, as they are more idiomatic.
For Godot 4.0, we now generate C# events for all Godot signals, which allows developers to use the event syntax that they are used to and love. As a consequence, subscribing to Godot signals using the event syntax is type-safe.
These generated events differ from normal C# events in two ways:
* These events are disconnected from automatically upon freeing the Godot type that contains the signal.
* Using the `Invoke` method to raise the event is not allowed.
We are looking into ways to allow raising events in a type-safe way, so keep an eye out for that in the future.
For more information on how to use signals in C#, take a look at the new documentation page about [_C# Signals_](https://docs.godotengine.org/en/latest/tutorials/scripting/c_sharp/c_sharp_signals.html).
#### Relevant links
- Fix C# bindings after recent breaking changes ([GH-37050](https://github.com/godotengine/godot/pull/37050)).
- Create a dedicated "C# Signals" page ([GH-6643](https://github.com/godotengine/godot-docs/pull/6643)).
## Callable support
In Godot 4 we introduce the new `Callable` type that represents a method in an object instance or a standalone function. This may sound familiar, because C# already supports a similar concept with delegate types, the `Action` and `Func` types, and lambdas.
In order to support interoperability with the engine, Godot's C# API implements a `Callable` type that can be created from an `Action` or a `Func`. This also allows developers to use C# lambdas with Godot API.
`Callable`s can be invoked by using the `Call` or `CallDeferred` methods. We currently don't support binding values to the parameters in C#. This shouldn't be a problem when using C# lambdas since they can use closures to support this scenario.
```csharp
string name = "John Doe";
Callable.From(() => SayHello(name));
void SayHello(string name)
{
GD.Print($"Hello, {name}");
}
```
#### Relevant links
- Fix C# bindings after recent breaking changes ([GH-37050](https://github.com/godotengine/godot/pull/37050)).
- C#: Reflection-less delegate callables and nested generic Godot collections ([GH-67987](https://github.com/godotengine/godot/pull/67987)).
## Int
| |
140623
|
and Float changes
In Godot API the names `INT` and `FLOAT` are used to mean 64-bit integer and floating types, respectively. This is true even in Godot 3, but in C# we used to marshal those types as 32-bit C# types `int` and `float`. This led to [marshaling issues](https://github.com/godotengine/godot/issues/39609) and potential precision loss. This has been fixed in Godot 4.0, as now we use the same bit-ness as the engine does. This means some APIs have changed from `int` to `long` and from `float` to `double`.
For vector types, Godot uses 32-bit floats by default (this can be changed by building the engine with `--precision=double`). This may lead to some inconvenient situations where values must be converted to `float` before using them with vectors. This limitation also applies to some engine core APIs, the most common one being the `delta` parameter in `_Process` and `_PhysicsProcess`. The `Mathf` API has been updated to support both `float` and `double` overloads, which should reduce friction with these changes.
#### Relevant links
- C#: _Process / _PhysicsProcess receive delta as float instead of double ([GH-65139](https://github.com/godotengine/godot/issues/65139)).
- C#: Assume 64-bit types when type has no meta ([GH-65168](https://github.com/godotengine/godot/pull/65168)).
- Reduce the amount of casting required for floating points in C# ([GH-5403](https://github.com/godotengine/godot-proposals/issues/5403)).
- C#: Add float and double overloads to Mathf ([GH-71583](https://github.com/godotengine/godot/pull/71583)).
## Renames
A lot of types and members have been renamed in Core with the intention to make names clearer. In C# we took the opportunity to also rename a bunch of APIs in order to more closely follow the [.NET naming guidelines](https://learn.microsoft.com/en-us/dotnet/standard/design-guidelines/naming-guidelines).
The [_Capitalization Conventions_](https://learn.microsoft.com/en-us/dotnet/standard/design-guidelines/capitalization-conventions) guidelines indicate that C# should use PascalCase for all identifiers except parameter names (including acronyms over two letters in length). As a result, some types have been renamed (e.g. `CPUParticles2D` is now `CpuParticles2D`, `DTLSServer` is now `DtlsServer`).
The `Godot.Object` type is now named `GodotObject` in order to avoid conflicting with the `System.Object` type. This follows the [_Namespaces and Type Name Conflicts_](https://learn.microsoft.com/en-us/dotnet/standard/design-guidelines/names-of-namespaces#namespaces-and-type-name-conflicts) guidelines:
- Types in two different namespaces shouldn't have the same name if those namespaces are often used together.
- Type names should not conflict with the names of types in the .NET Core namespaces (such as `System`).
The [_General Naming_](https://learn.microsoft.com/en-us/dotnet/standard/design-guidelines/general-naming-conventions#using-abbreviations-and-acronyms) guidelines also recommend to avoid abbreviations or contractions as part of identifier names. Acronyms should only be used when they are widely accepted, and even then, only when necessary.
This should make Godot's API more consistent with the rest of the .NET ecosystem, and hopefully avoid some conflicts.
#### Relevant links
- PascalCase naming inconsistencies with C# ([GH-28748](https://github.com/godotengine/godot/issues/28748)).
- C#: Renames to follow .NET naming conventions ([GH-69547](https://github.com/godotengine/godot/pull/69547)).
## Exported property improvements
A lot of highly requested features have been implemented in Godot to improve exported properties and the inspector. These features are also supported in C#.
In 4.0, you are now able to export properties of type `Node` or any derived type directly, without the need to export a `NodePath` and retrieve it manually.
It's also now possible to group properties with the new `[ExportCategory]`, `[ExportGroup]` and `[ExportSubgroup]` attributes. Take a look at the updated [_C# Exports_](https://docs.godotengine.org/en/latest/tutorials/scripting/c_sharp/c_sharp_exports.html) documentation page to learn more about the new attributes.
Support for flag enums (annotated with the `[System.Flags]` attribute) has also been improved. Exported properties for such enums now show up as checkboxes in the Inspector.
Due to [engine interop improvements](#engine-interop-with-source-generators) we now use source generators instead of reflection. This means that the attributes are not retrieved at runtime. Instead the source generators generate different code based on the attributes found from the source analysis. This makes it much more difficult to support custom user attributes derived from the Godot attributes. With our current source generator implementation, derived attributes have no effect on user scripts.
To prevent a possible confusion, we've sealed the attributes to disallow inheriting from them. We are looking into ways to support extending our source generators to allow custom user attributes to affect scripts, so keep an eye out for that in the future.
In Godot 4.0, a bunch of new export annotations were added to GDScript. However, we didn't have time to add equivalent attributes in C#. All those annotations can be replicated with the `[Export]` attribute using the right combination of `PropertyHint` and `HintString` values, until we implement these convenient attributes in a future version of Godot 4.
The much anticipated support for exporting [custom resources](#global-classes) hasn't made it in time, unfortunately, and is still in development, but you can expect it to arrive very soon.
#### Relevant links
- Ability to export Node types instead of just NodePaths ([GH-1048](https://github.com/godotengine/godot-proposals/issues/1048)).
- Add ability to export Node pointers as NodePaths ([GH-62185](https://github.com/godotengine/godot/pull/62185)).
- C#: Enable exporting nodes to the inspector ([GH-62789](https://github.com/godotengine/godot/pull/62789)).
- Add grouping annotations for class properties in GDScript ([GH-62707](https://github.com/godotengine/godot/pull/62707)).
- C#: Add an easy and accessible way of organizing export variables ([GH-3451](https://github.com/godotengine/godot-proposals/issues/3451)).
- C#: Add grouping attributes for properties ([GH-64742](https://github.com/godotengine/godot/pull/64742)).
- C#: Preserve order of exported fields/categories ([GH-64852](https://github.com/godotengine/godot/pull/64852)).
- Support explicit values in flag properties, add C# flags support ([GH-59327](https://github.com/godotengine/godot/pull/59327)).
## Improved C# documentation
A lot of Godot tutorials and tips you can find online contain samples of GDScript code. To help you with interpreting them into C# equivalents the official documentation has a [C# differences article](https://docs.godotengine.org/en/latest/tutorials/scripting/c_sharp/c_sharp_differences.html), which has also been updated. It now features a complete set of tables with the C# equivalent for every common GDScript API. Sometimes a Godot C# equivalent does not exist because there are more suitable methods in the .NET Base Class Library.
The new equivalence tables try to encompass every possible case, and should be a great tool for developers looking to port GDScript code to C#. Let us know if this can be improved!
#### Relevant links
- C#: Add table with equivalent string methods ([GH-6442](https://github.com/godotengine/godot-docs/pull/6442)).
- C#: Add table with equivalent GlobalScope methods ([GH-6721](https://github.com/godotengine/godot-docs/pull/6721)).
- C#: Add table with equivalent Array methods ([GH-6677](https://github.com/godotengine/godot-docs/pull/6677)).
- C#: Add table with equivalent Dictionary methods ([GH-6676](https://github.com/godotengine/godot-docs/pull/6676)).
- Add Color section to C# differences page ([GH-6679](https://github.com/godotengine/godot-docs/pull/6679)).
## NuG
| |
140625
|
ure
Some features didn't make it in time for Godot 4.0, but are already on our roadmap for a future release (and some are already in the works).
### Global classes
Global classes, also known as named scripts, are classes registered in the editor so they can be used more conveniently. These classes appear in the _Add Node_ and _Create Resource_ dialogs. GDScript supports this feature with the `class_name` syntax.
We tried, but ultimately failed, to bring this feature to C# in time for 4.0. Luckily, there's already a PR open, so it's very likely it is going to be ready for the next minor release, Godot 4.1.
#### Relevant links
- Add first-class custom resource support ([GH-18](https://github.com/godotengine/godot-proposals/issues/18)).
- Allow exporting custom resources from/to GDScript, VisualScript, C#, and PluginScript ([GH-48201](https://github.com/godotengine/godot/pull/48201)).
- Script-class-aware Inspector & related controls ([GH-62413](https://github.com/godotengine/godot/pull/62413)).
- Enable QuickOpen to see scripted resources ([GH-62417](https://github.com/godotengine/godot/pull/62417)).
- Add C# resource export ([GH-72619](https://github.com/godotengine/godot/pull/72619)).
### Mobile and web support
We aim to support C# on all the platforms that Godot is available on. Unfortunately, we were unable to implement support for mobile and web platforms for 4.0. These platforms gained support upstream somewhat recently so we didn't have much time to work on it. The new workloads in .NET 6 should allow us to support the mobile and web platforms really soon, so keep an eye out for that.
#### Relevant links
- [Announcing .NET 6 — The Fastest .NET Yet](https://devblogs.microsoft.com/dotnet/announcing-net-6/).
### Full AOT and trimming support
Support for these features can greatly reduce binary size and enable further performance optimizations. In order to support them, we need to ensure that our libraries are prepared. Dynamically accessed types can't be trimmed, so we need to avoid reflection as much as possible.
Some target platforms don't support JIT, so we are definitely looking into supporting AOT. We'll be working on this as we add support for more platforms in future 4.x releases.
#### Relevant links
- [App Trimming in .NET 5](https://devblogs.microsoft.com/dotnet/app-trimming-in-net-5/).
- [Native AOT deployment](https://learn.microsoft.com/en-us/dotnet/core/deploying/native-aot/).
### GDExtension support
The ability to create GDExtensions using C# will enable new and powerful workflows. This would allow us to avoid the limitations of a scripting language implementation, such as [relying on file paths](https://github.com/godotengine/godot/issues/15661) to reference C# classes. Users are going to be able to implement C# types that can be registered in Godot's internal type database, ClassDB, and that would behave as built-in Nodes and Resources.
GDExtension is still very new to the engine, and is subject to change. It also has some limitations that would result in UX regressions, if C# moved away from the scripting language implementation. But it also has the potential to reduce some pain points that users have had in the past. In Godot 4.0, C# is still implemented as a scripting language, but we are planning to add support for GDExtension creation using C#. We will be keeping the scripting language implementation for your regular game scripting needs.
Consuming APIs provided by a GDExtension is also currently unsupported. We'll keep working on bridging the gap between GDExtension and C# in future 4.x releases.
#### Relevant links
- [Introducing GDNative's successor, GDExtension](https://godotengine.org/article/introducing-gd-extensions/).
### Improve source generators extensibility
As mentioned throughout this article, several areas can benefit from allowing users to create their own third-party source generators that can extend what's currently available. For example, allowing our source generators to be affected by custom user-defined attributes.
This is tricky to get right, but we'll be exploring some ideas in future 4.x releases to see how we can provide some extensibility without requiring users to disable and re-implement all of our source generators.
### Editor unification
Just like Godot 3 before it, Godot 4.0 is going to come with two versions of the editor, one "standard" and one with extra support for C# and .NET. However, the plan for 4.x is to provide one unified editor, that support everything the .NET build currently supports. If you're a GDScript user, this doesn't come at any cost for you. But for maintainers and C# developers it greatly simplifies things and allows for more efficient editor development and testing. To keep things slim and flexible, components required to support .NET projects would be downloaded on-demand when the user needs them.
#### Relevant links
- From embedding Mono to Godot as a library and the future ([GH-2333](https://github.com/godotengine/godot-proposals/issues/2333)).
### Godot as a library
The concept of Godot as a library and C# being the entry point is something that a lot of users seem to be interested in. We think this could bring many benefits to C# users, it would make it easier to support all the platforms where .NET is available, and stay up-to-date with newer .NET versions.
We started exploring this area in 4.0, but it still requires more investigation and work. It's unlikely to be finished any time soon but we'll keep working on it in the future.
#### Relevant links
- From embedding Mono to Godot as a library and the future ([GH-2333](https://github.com/godotengine/godot-proposals/issues/2333)).
---
As you can see, a lot of work has been done, and more work is in the pipeline. We are constantly looking into more ways to improve performance and usability of the API, and we keep fixing all the bugs that you can find. We are looking forward to see what you think of the .NET module in Godot 4.0. Don't hesitate to reach out with your feedback! 🙂
| |
140649
|
---
title: "Multiplayer in Godot 4.0: RPC syntax, channels, ordering"
excerpt: "New RPC syntax and features in Godot 4.0. Introducing channels and ordered transfer mode."
categories: ["progress-report"]
author: Fabio Alessandrelli
image: /storage/app/uploads/public/615/766/117/6157661176579529840022.png
date: 2021-09-25 14:00:00
---
Howdy Godotters! Time for [another update](https://godotengine.org/article/multiplayer-changes-godot-4-0-report-1) on Godot 4.0's multiplayer networking.
We have been really busy working on the foundation of the networking and multiplayer classes lately, and there are quite a few new features to talk about. In this post, we'll start by showing some of the new RPC syntax and features.
*See other articles in this Godot 4.0 networking series:*
1. [Multiplayer in Godot 4.0: On servers, RSETs and state updates](https://godotengine.org/article/multiplayer-changes-godot-4-0-report-1)
2. (you are here) [Multiplayer in Godot 4.0: RPC syntax, channels, ordering](https://godotengine.org/article/multiplayer-changes-godot-4-0-report-2)
3. [Multiplayer in Godot 4.0: ENet wrappers, WebRTC](https://godotengine.org/article/multiplayer-changes-godot-4-0-report-3)
4. [Multiplayer in Godot 4.0: Scene Replication (part 1)](https://godotengine.org/article/multiplayer-changes-godot-4-0-report-4)
### Simplified RPC configuration
First of all, many users found the old `master` and `puppet` keywords in `3.x` were confusing.
The `master` keyword would mean that a function could be called on the "network master", while `puppet` that a function could be called only on the "non-master" peers. Additionally, the old `master` keyword had very little usage, since `remote` could be used in its place with little to no effort.
Learning from this, we decided to have a unified `@rpc` annotation with a few optional parameters.
#### Authority
```
@rpc
func my_rpc():
print("RPC called.")
```
By default, `@rpc` only allows calls from the **multiplayer authority**, which is the server by default. You can optionally set the multiplayer authority on a per-node basis via the `Node.set_multiplayer_authority()` method.
In this sense, the `@rpc` annotation alone behaves like the old `puppet` keyword.
```
@rpc(any_peer)
func my_rpc():
print("RPC called by: ", multiplayer.get_remote_sender_id())
```
As mentioned above, the `@rpc` annotation can also take optional parameters. If one of those parameters is `any_peer`, the RPC will be callable by any connected peer, behaving like the old `remote` keyword. You can get the ID of the peer that is making the call via the `MultiplayerAPI.get_remote_sender_id()` method.
```
@rpc(any_peer)
func my_rpc():
var peer_id = multiplayer.get_remote_sender_id()
if peer_id == get_multiplayer_authority():
# The authority is not allowed to call this function.
return
print("RPC called by: ", peer_id)
```
There is no direct replacement for the rarely used `master` keyword. In those cases, `@rpc(any_peer)` can be used by adding an extra check against the called ID as is done above.
#### Calling functions locally
```
@rpc(call_local)
func my_sync_rpc():
print("RPC called")
```
In Godot, it's possible to instruct the engine that a specific function has to also be called locally when sending RPCs.
In Godot `3.x`, this was done using even more dedicated keywords (e.g. `puppetsync`, `remotesync`, etc). In Godot 4.0, `sync` is now an optional parameter of the `@rpc` annotations.
Parameters do not need to be in a particular order, so `@rpc(call_local, any_peer)` and `@rpc(any_peer, call_local)` have the equivalent meaning of defining an RPC that any peer can call. Thanks to the `sync` parameter, the RPCs will also be executed locally on the sending peer.
### Simplified RPC calls
In Godot `3.x`, we used to have 2 different transfer modes for RPCs: reliable and unreliable.
Calling `rpc("my_func")` would transfer it reliably, while calling `rpc_unreliable("my_func")` would transfer it unreliably.
In most cases though, you would always want the same transfer mode to be used (with few exceptions).
```
@rpc(unreliable)
func my_unreliable_rpc():
print("RPC called.")
```
In Godot `4.0` we decided to also make the transfer mode a parameter of the `@rpc` annotation.
You will still be able to to override the configuration for a specific RPC using a dedicated `rpc_raw` function (not implemented yet).
### Channels and ordering
Two new features of the multiplayer API in Godot `4.0` are the introduction of channels, and the ordered transfer mode.
#### Channels
Most realtime network protocols, including ENet and WebRTC, support the concept of *channels*.
You can think of channels like separate streams inside the same connection, or even separate connections to the same remote peer if you wish. Each channel acts independently from each other, and like rivers flowing at different speeds reliable messages sent on different channels, might arrive in a different order.
This might at first seem like a limitation, but it is actually their true power.
Every time you send a message (RPC) in a reliable way, the protocol needs to keep track of it and wait until the client acknowledge its receival before sending more messages. While there are many techniques protocols use to optimize this process (e.g. buffering multiple messages), this inevitably introduces latency.
In your game, you will likely have some RPCs that are quite unrelated from the others (e.g. the player chat). These RPCs don't have to be perfectly in sync with the rest of the game (while retaining internal order). In those cases, especially when transfering larger amount of data, using a separate channel is an efficient way to reduce latency and lower the risk of disconnections.
```
@rpc(any_peer, 1)
func my_chat_func():
print("RPC received on channel 1.")
```
Godot 4.0 will make these optimizations easier, allowing you to specify a different channel to use other than the default by passing an integer as the last parameter of the `@rpc` annotation.
This also comes handy with the other new feature, the "ordered" transfer mode.
#### Ordering
```
@rpc(unreliable_ordered)
func my_ordered_rpc():
print("Ordered RPC received")
```
In general, unreliable RPCs are not guaranteed to arrive in order. If the server sends first the message `A` and then the message `B`, a client *could* receive `B` first, and then `A`.
An "ordered" RPC is an unreliable RPC that still guarantees the received messages to be in the right order. That is, if the clients receive `B`, it will automatically discard any message that the server sent before that (including `A` if received at a later time).
**A note of caution:** Ordered transfer mode is a powerful tool to further squeeze performances out the network connections, but it has the downside of potentially increasing packet loss if not used properly.
```
@rpc(unreliable_ordered, 1)
func _update_players_state(state):
# Code to update the state of the players
pass
@rpc(unreliable_ordered, 2)
func _update_enemies_state(state):
# Code to update the state of the enemies.
pass
```
When using the ordered transfer mode, be advised **you should not send heterogeneous messages** over the same channel.
In the code snippet above, we are are designing a game where we don't mind if the player and enemy states are a bit offsetted in the client. However, we do want each state to only be updated if the received one is newer (hence the "ordered" mode).
In this case, we must use different channels for the 2 RPCs because we want 2 separate orderings. Otherwise, a "players" update could be dropped because a newer "enemies" state has already been received, which is not what we want.
### Future work
There are a lot of exciting new things to talk about, from heavy refactoring of the networking classes and exposing most low-level ENet functions, to the high level work done on the new scene replicator. Stay tuned for more :)
### Reference work
- [RPC refactor](https://github.com/godotengine/godot/pull/49221)
- [`@rpc` annotation PR](https://github.com/godotengine/godot/pull/49882)
- [Master/puppet removal, authority](https://github.com/godotengine/godot/pull/51481) (only recently merged as it required some consensus on the new namings).
| |
140665
|
cs and navigation {#physics}
Godot 4 marks a big return of Godot's in-house 3D physics engine, **Godot Physics**. For years, Godot has relied on the **Bullet** engine to provide a solid foundation for your 3D projects. We felt, however, that a bespoke solution would give us more flexibility when implementing new features and fixing issues.
But first, we needed to bring Godot Physics on-par with Bullet feature-wise, and improve performance and precision of these features along the way. This included adding new collision shapes, [cylinder](https://github.com/godotengine/godot/pull/45854) and [heightmap](https://github.com/godotengine/godot/pull/47347), as well as re-implementing [SoftBody nodes](https://github.com/godotengine/godot/pull/46937). In addition to feature-specific improvements, general optimization techniques, such as broadphase optimization and multithreading support, were implemented for both 2D and 3D environments. Some of these improvements can also be found in recent Godot 3 releases.

With that done, it was time to improve the the user side of things. In Godot 4 setting up scenes is a breeze after a [major reorganization of physics nodes](https://github.com/godotengine/godot/pull/48908). A lot of properties previously unique to specific body types are now available to all **PhysicsBody** nodes. This allows us to introduce the new **CharacterBody** node to replace old kinematic bodies and make the configuration of characters much simpler. Scripting them is simpler now as well. In previous versions of the engine properties related to moving, sliding, and colliding had to be passed to each corresponding method manually. They can now be set up using scenes, on the nodes themselves reducing code needed to have desired physical interactions.
But a new release is not just new big features. A significant effort was put to fix various issues causing jitters and imprecise computations. You can read more about all this work by contributors Camille Mohr-Daurat ([pouleyKetchoupp](https://github.com/pouleyKetchoupp)), [lawnjelly](https://github.com/lawnjelly), and Fabrice Cipolla ([fabriceci](https://github.com/fabriceci)) in [this blog post](https://godotengine.org/article/physics-progress-report-1) by Camille, who helms the physics development in Godot.
To breathe more life into physical bodies, the next major version of Godot also introduces a new navigation system. Previous versions of the navigation were entirely node-based, which limited their usability and performance. Thanks to work done by [Andrea Catania](https://github.com/AndreaCatania), Godot 4 features a server-based approach to navigation.
The new **NavigationServer** supports fully dynamic environments and on-the-fly navigation mesh baking. You can stream regions, which makes the system applicable to large open spaces. Physics bodies can be marked as obstacles for automatic collision avoidance, and it all works much faster than before thanks to multithreading support.
Andrea described the new system with a great practical example in a [dedicated article](https://godotengine.org/article/navigation-server-godot-4-0), and we recommend you give it a read.
### Scripting {#scripting}
[A recent study shows](https://godotengine.org/article/godot-community-poll-2021) that 100% of Godot users love to write a lot of code for their projects! With **GDScript** being the most used language, we wanted to really improve the coding experience in Godot 4 with some of the most requested and long-awaited language features. You can now reap the benefits of first-class functions and lambdas, new property syntax, the `await` and `super` keywords, and typed arrays. New built-in annotations make the language clearer and improve syntax for exported properties. And to top it off, your scripts can now automatically generate documentation that can be studied with the built-in help and the Inspector dock tooltips.

Despite growing in features, the GDScript runtime is only faster and more stable in Godot 4. This was achieved by a complete rewrite of the language backend by our main scripting maintainer George Marques ([vnen](https://github.com/vnen)). If you are interested in further reading George has provided several detailed reports on the new language features ([1](https://godotengine.org/article/gdscript-progress-report-new-gdscript-now-merged), [2](https://godotengine.org/article/gdscript-progress-report-feature-complete-40)), as well as on the decision-making process for the new language parser and runtime ([1](https://godotengine.org/article/gdscript-progress-report-writing-tokenizer), [2](https://godotengine.org/article/gdscript-progress-report-writing-new-parser), [3](https://godotengine.org/article/gdscript-progress-report-type-checking-back), [4](https://godotengine.org/article/gdscript-progress-report-typed-instructions)). The documentation feature was implemented by a student, Thakee Nathees ([ThakeeNathees](https://github.com/ThakeeNathees)), during the last year's Google Summer of Code. You can read their report [here](https://godotengine.org/article/gsoc-2020-progress-report-1#gdscript-doc).
Sometimes user-level scripting is not enough, though. Being an open source project, Godot has always valued extensibility. With the existing *GDNative* API layer, you don't even have to fork the engine to extend it. But it was our first attempt at making a nice abstraction layer for engine internals that you could plug-and-play into. And so for all its benefits, GDNative didn't feel quite there yet.
This is why with Godot 4, we introduce a new system called **GDExtension**. By design, it takes the best parts of creating GDNative extensions and writing custom engine modules. The code that you make can be ported into the engine if needs be, and, vice versa, some engine parts can be made into a GDExtension library, reducing engine bloat. All this still without having to recompile the engine.
The new GDExtension system was implemented by Juan and George, and further improved by many contributors, especially while porting the [official godot-cpp C++ bindings](https://github.com/godotengine/godot-cpp). Resident XR enthusiast and Godot contributor Bastiaan Olij ([BastiaanOlij](https://github.com/BastiaanOlij)) took time to make a blog post to [introduce GDExtensions](https://godotengine.org/article/introducing-gd-extensions).
### GUI a
| |
140794
|
ing}
### GDScript {#gdscript}
With **GDScript** being the most used language among current Godot users, we wanted to really improve the coding experience in Godot 4 with some of the most requested and long-awaited language features. You can now reap the benefits of first-class functions and lambdas, new property syntax, the `await` and `super` keywords, and typed arrays. New built-in annotations make the language clearer and improve syntax for exported properties. And to top it off, your scripts can now automatically generate documentation that can be studied with the built-in help and the Inspector dock tooltips.
<video autoplay loop muted>
<source src="/storage/app/media/4.0/beta1/scripting-gdscript.mp4?1" type="video/mp4">
</video>
Despite growing in features, the GDScript runtime is only faster and more stable in Godot 4. This was achieved by a complete rewrite of the language backend by our main scripting maintainer George Marques ([vnen](https://github.com/vnen)). If you are interested in further reading George has provided several detailed reports on the new language features ([1](https://godotengine.org/article/gdscript-progress-report-new-gdscript-now-merged), [2](https://godotengine.org/article/gdscript-progress-report-feature-complete-40)), as well as on the decision-making process for the new language parser and runtime ([1](https://godotengine.org/article/gdscript-progress-report-writing-tokenizer), [2](https://godotengine.org/article/gdscript-progress-report-writing-new-parser), [3](https://godotengine.org/article/gdscript-progress-report-type-checking-back), [4](https://godotengine.org/article/gdscript-progress-report-typed-instructions)). The documentation feature was implemented by a student, Thakee Nathees ([ThakeeNathees](https://github.com/ThakeeNathees)), during the last year's Google Summer of Code. You can read their report [here](https://godotengine.org/article/gsoc-2020-progress-report-1#gdscript-doc).
<h3 id="csharp">C#</h3>
There is, of course, great news for those waiting on C# support to return to the engine, as it was noticeably missing from alpha builds throughout 2022. The much anticipated port to .NET 6 has been mostly [completed](https://github.com/godotengine/godot/pull/64089)! It was added relatively recently and so has not been tested in the alphas as much as other features. Please be cautious in your testing and report any issues that you face.
With the move to **.NET 6**, users can now target a newer framework that brings optimizations and new APIs. With .NET 6, projects use C# 10 by default and all features are available.
Godot 4 moves away from reflection, instead relying on source generators to improve performance, moving a lot of the work that we used to do at runtime to compile time. This also allows us to find and report errors when building the project instead of failing when running the game, such as using unsupported types in exported properties. We hope the new analyzers will help users avoid common pitfalls and write better code.
Of course, the 4.0 release is also a great opportunity to break compatibility to try and make the API better. Anything that changed in core APIs is also reflected in the .NET APIs and one of the most notable changes is the use of 64-bit types as scalar values, this means many APIs that used `int` or `float` now use `long` and `double` with the most noticeable being the `_Process` method. A `Variant` type is also now implemented that is used in every API that takes variants where we were using `System.Object` in the past. This brings some improvements such as avoiding boxing the values and you can read more about it in [this proposal](https://github.com/godotengine/godot-proposals/issues/3837).
Another change worth mentioning is the ability to declare signals as C# events. Declaring signals is done by writing a delegate with the `[Signal]` attribute like in the past, but now the delegate name must end with the `EventHandler` suffix and an event will be generated, which can be used to connect to and disconnect from the signal. Emitting a signal is currently done with the `EmitSignal` method but that may change in the future:
```cs
[Signal]
delegate void ValueChangedEventHandler(string newValue);
// The compiler generates the following event
public event ValueChangedEventHandler ValueChanged;
// Connect
ValueChanged += Foo;
// Disconnect
ValueChanged -= Foo;
// Emit
EmitSignal(SignalName.ValueChanged);
```
There's still more work to be done in the .NET module, some of which will likely still break compatibility even during the beta so make sure to keep backups and clear the `.godot` directory on updates to ensure a clean build.
One of the big changes that we are still working on is support for writing GDExtensions in C#. With GDExtension, C# classes will be registered in the engine and work as the built-in classes do, which should improve the support of C# nodes and resources throughout the engine.
Currently, the .NET version of Godot still requires a separate build of Godot, just as in Godot 3. However, we are planning on unifying the editor so there won't be a standard and a .NET build anymore but a single editor that will download the necessary additional components when .NET is used.
Stay tuned for a deeper dive in the new features and upcoming changes in future progress reports!
**Important remarks:**
- The Godot editor requires the .NET 6.0 SDK to be installed in order to use C#.
- Godot 4 doesn't support C# projects imported from Godot 3. It may be possible to edit your project file manually, but otherwise it's recommended to let Godot generate a new one.
- Currently, mobile and web platforms are not available, with support likely coming in Godot 4.1.
### GDExtension {#gdextension}
Sometimes user-level scripting is not enough, though. Being an open source project, Godot has always valued extensibility. With the existing *GDNative* API layer, you don't even have to fork the engine to extend it. But it was our first attempt at making a nice abstraction layer for engine internals that you could plug-and-play into. And so for all its benefits, GDNative didn't feel quite there yet.
This is why with Godot 4, we introduce a new system called **GDExtension**. By design, it takes the best parts of creating GDNative extensions and writing custom engine modules. The code that you make can be ported into the engine if need be, and, vice versa, some engine parts can be made into a GDExtension library, reducing engine bloat. All this still without having to recompile the engine.
The new GDExtension system was implemented by Juan and George, and further improved by many contributors, especially while porting the [official godot-cpp C++ bindings](https://github.com/godotengine/godot-cpp). Resident XR enthusiast and Godot contributor Bastiaan Olij ([BastiaanOlij](https://github.com/BastiaanOlij)) took time to make a blog post to [introduce GDExtensions](https://godotengine.org/article/introducing-gd-extensions).
## Gui and Text {#gui
| |
140833
|
---
title: "Agile input processing is here for smoother, more responsive gameplay"
excerpt: ""
categories: ["progress-report"]
author: Pedro J. Estébanez
image: /storage/app/uploads/public/612/136/3cc/6121363ccb95e735957693.png
date: 2021-08-21 17:00:00
---
Since it's not very usual I post here, let me remind you who I am. I'm Pedro, a.k.a. [RandomShaper](https://twitter.com/RandomPedroJ) in the Godot community. I've been contributing to the engine since 2016, when I discovered it –version 2.1 was the newest– and decided to use it to create my game [Hellrule](http://pedrocorp.net/#hellrule). Precisely while testing this project on different models of Android phones, I found the need to make the improvements I'm explaining in this post.
### Old behavior in Godot 3.3.x and before
In a game engine, the *engine loop* is the sequence of steps that is happening again and again to let the game run. This includes rendering, physics, input processing, and more. Optimizing this loop to run with as little CPU time as possible is important to have smooth gameplay on high-end and low-end hardware alike.
Godot's engine loop used to look like this (this is heavily simplified to just show what we are talking about here):

The key element of this is the possibility of multiple physics steps being processed per cycle of the engine loop. Consider a game that wants its gameplay logic and physics to run at 60 FPS. To achieve this, the engine needs to poll for the player's inputs from various sources, such as a mouse, keyboard, gamepad or touchscreen. Ideally, the engine would read the player's inputs once per each of those gameplay-physics steps so it reacts as quickly as possible to player actions. Also, rendering would happen at that very same pace, so everything stays in sync.
However, depending on the demands of the game and the hardware it's running on at a given time, that may not be possible. If the device running the game is not powerful enough to keep everything at 60 FPS, the engine will run at a lower effective FPS rate. Rendering and idle processing will then occur less than 60 times per second, but the engine will do its best to have the gameplay-physics running at the target rate, by executing more than one of those physics steps per visible frame.
If you look again at the game loop above, you'll understand that a consequence of the engine looping at a lower frequency is that, user input is also pumped and handled less frequently, which leads to having a **lower responsiveness** in addition to a less smooth update of the display.
#### New behavior in Godot 3.4 beta and later
In order to avoid that, Godot needed to somehow **uncouple input from render** so the engine main loop looked more like this:

To make that happen, I've added the concept of **input buffering**, which allows that one thread –usually the one consuming events from the OS– stores the input events from the player in a buffer as they are received while the main thread of the engine **flushes them at key points of the cycle**. This new approach improves the responsiveness of the game in situations of lower-than-ideal FPS.
### Remarks
- This enhancement is implemented in two pieces. One belongs to the core of the engine, whereas the other must be implemented on each platform. At this point, only Android has the platform-specific part implemented. (To be honest, that's most likely the platform where agile input flushing is needed the most, due to the huge range of hardware capabilities found across devices.)
- However, the doors are open wide for it to be implemented for other platform given the base is already done.
- **Both Godot 3.4 and 4.0** will enjoy agile input, exposed as the `input_devices/buffering/agile_event_flushing` project setting.
- The project setting mentioned above is **disabled by default**. The rationale is that not every project may want to have batches of input events coming multiple times per frame. Also, until this feature is implemented universally, enabling it causes differences of behavior across platforms. Nonetheless, elaborating on the latter, my game has been in the wild for months with agile input flushing on Android and without it on iOS with no issues. As long as you make your game physics-centric, whether agile input is enabled or not won't have any side effects. The game will just be more responsive when it's enabled and available, but it will keep working without agile input.
| |
140860
|
---
title: "Godot gets CSG support"
excerpt: "After years of discussion on how to implement CSG, Godot finally gets suport for it. This implementation is simple, but makes use of Godot's amazing architecture to shine."
categories: ["progress-report"]
author: Juan Linietsky
image: /storage/app/uploads/public/5ae/4d0/8c6/5ae4d08c674b5082905650.png
date: 2018-04-28 00:00:00
---
After years of discussion on how to implement CSG, Godot finally gets suport for it. This implementation is simple, but makes use of Godot's advanced architecture to shine.
### Wait, what is CSG?
CSG stands for "Construtive Solid Geometry", and is a tool to combine basic (and not so basic) shapes to create more complex shapes. In the 3D modelling software, CSG is mostly known as "Boolean Operators".

### Why is CSG relevant?
The aim of CSG in Godot is for it to be used in level prototyping. This technique allows to create simple versions of most common shapes by combining primitives. Interior environments can be created by using inverted primitives instead.
CSG is a vital tool for level design (and level designers in companies) as it allows to test gameplay environments without modelling a single triangle. Unreal has always offered similar boolean CSG, while Unity has recently acquired ProBuilder (which is a different type of tool, but still aimed at prototyping level geometry).
For developers, creating 3D art is a time consuming process. For indies or small companies, it may even involve outsourcing to third party artists. With CSG, a game can be developed almost entirely without relying on 3D environment artists, only for that content to be filled in the end when gameplay is already working.

### How does it work?
Godot provides a bunch of Primitive nodes:
* **Sphere**
* **Box**
* **Cylinder** (can be used as Cone)
* **Torus** (donut shape)
* **Polygon** (can be drawn in 2D and then extruded)
* **Mesh** (can use any custom geometry)

Each of these operates on the parent CSG node, in order. Supported operations are:
* **Union**: Geometry of both primitives is merged, intersecting geometry is removed
* **Intersection**: Only intersecting geometry remains, the rest is removed
* **Subtraction**: The second shape is susbtracted from the first, leaving a dent with it's shape.

##### Process order
Every CSG node will first process it's children nodes (an their operation: union, intersection substraction), in tree order and apply them to itself one after the other.
There is a special CSGCombiner node that is pretty much an empty shape. It will only combine children nodes. It's used for organization mostly.
##### Polygon and lofting
The CSGPolygon node is very convenient, a Polygon is drawn in 2D (in X,Y coordinates), and it can be extruded in the following ways:
* **Depth**: Extruded back a given amount
* **Spin**: Extruded while spinning aroud it's origin.
* **Path**: Extruded along a Path node. This operation is commonly called *lofting*.

##### Custom meshes
Any mesh can be used for CSG, this makes it easier to implement some types of custom shapes. Even multiple materials will be properly supported. There are some restrictions for geometry, though:
* It must be closed
* It must not self-intersect
* Is must not contain internal faces
* Every edge must connect to only two other faces

Make sure CSG geometry remains relatively simple, as complex meshes can take a while to process.
##### A note on performance
If adding objects together (such as table and room objects). It's better if this is done as separate CSG trees. Forcing too many objects in a single tree will eventually start affecting performance. Only use binary operations where you actually need them.
### Godot CSG Implementation
As many libraries seemed to exist for this, I decided to pick one and put it in Godot and implement it over a weekend. Of course, this unfortunately did not work well. A few things stopped me. Pretty much every library I found was:
* Under GNU GPL or GPL, making it incompatible with Godot
* Using really crappy algorithms such as voxel or BSP, making them inefficient.
* Designed for 3D modelling or mathematics, so they used very tuned algorithms dealing with avoiding precision errors.
Nothing was really meant for games. This led me to write a custom one for Godot, with focus exclusively on performance.
The current implementation is really simple. It does brute force clipping of triangles without relying on vertex IDs, or isolating edges to detect interior faces. Instead the implementation in Godot does triangle-triangle raytracing to detect which halves ended up inside the intersection.
This is expensive per se, but it's optimized by doing a pre-aabb intersection and balanced binary trees to minimize the amount of ray tests. As a result, the performance is really good.
As the implementation is pure brute force, some visual artifacts may rarely appear in some cases if you look well, but truth is that they don't have any practical impact as CSG in Godot is mostly meant for level prototyping. In fact I'm sure for some types of game CSG could easily be usable in production.
### Future
The main missing feature now is the ability to export the scene to a 3D modelling software (likely via GLTF2), so the process of replacing CSG art by final art can be done by a professional artist. This will be added soon.
Please test well! And remember that we make Godot with love for everyone. If you are not already, please consider [becoming our patron](https://www.patreon.com/godotengine), so we can continue doing this for you!
| |
140938
|
---
title: "GDScript progress report: Writing a new parser"
excerpt: "Showing the work for the new GDScript parser, why it is done and how it improves over the old one. Also show a bit of new features."
categories: ["progress-report"]
author: George Marques
image: /storage/app/uploads/public/5ed/43c/e24/5ed43ce245f10218039510.png
date: 2020-06-01 11:37:58
---
As you might be aware right now I'm currently working on revamping the GDScript compiler. After finishing the tokenizer in the last report, I've been working on the new parser.
The main point to bring in this rewrite is to make a bit more "textbook-like" as mentioned in the previous article, so I split grammar production in different functions. Before we had big `switch` statements with many cases to parse the code, now we delegate the work to specific functions. This makes it easier to find where a particular piece of code is.
*See other articles in this Godot 4.0 GDScript series:*
1. [GDScript progress report: Writing a tokenizer](https://godotengine.org/article/gdscript-progress-report-writing-tokenizer)
2. (you are here) [GDScript progress report: Writing a new parser](https://godotengine.org/article/gdscript-progress-report-writing-new-parser)
3. [GDScript progress report: Type checking is back](https://godotengine.org/article/gdscript-progress-report-type-checking-back)
4. [GDScript progress report: New GDScript is now merged](https://godotengine.org/article/gdscript-progress-report-new-gdscript-now-merged)
5. [GDScript progress report: Typed instructions](https://godotengine.org/article/gdscript-progress-report-typed-instructions)
6. [GDScript progress report: Feature-complete for 4.0](https://godotengine.org/article/gdscript-progress-report-feature-complete-40)
## Less lookahead
The previous parser relied on the tokenizer ability to buffer tokens to ask for a few the next (and sometimes the previous) tokens in order to decide what the code means. The new tokenizer lacks this ability on purpose to make sure we can use the bare minimum of a lookahead to parse the code.
Minimizing lookahead means the code is easier to parse. If we were to introduce a new syntax for a feature, now we have to make sure it does not increase the complexity of the parser by demanding too much tokens at once.
The new parser only stores the current and previous token (or the current and the next if you look it in another way). It means that GDScript only needs one token of lookahead to be fully parsed. This takes us to a simpler language design and implementation.
## A Pratt parser for expressions
Those who are familiar with programming language implementation will know that parsing expressions is always tricky. When you consider precedence of operators, and that an arbitrary expression can be left operand of a, say, `+` operator, it can get very tricky to decide how to build the parse tree.
There are different solutions to this but I like the [Pratt parsing](https://journal.stuffwithstuff.com/2011/03/19/pratt-parsers-expression-parsing-made-easy/) for its simplicity. It consists in a function table in which the index is the token type. This means that deciding which parser function to call is just a lookup based on the current token.

This makes it very easy to see—and change—the precedence of every operators. The functions do have to share the signature, but it's usually not a big deal since most of them aren't called outside the expression parser.
## Multiple error detection
Also mentioned in the previous article, the idea is to show multiple errors at once so you don't need to fix one to discover the next. The new parser has support to store an arbitrary number of errors.
When it finds something unexpected, the parser enters *"panic mode"*. While in this mode, it will ignore every token until it finds one that can only be the beginning of a statement. At this point, it can leave the panic mode and go back to regular parsing. While this can cause cascading errors due to missing or extra token, it can show errors in different parts of the file at once.
This will also help completion since the parser won't need to stop on the first error and can check what's next in the file to also suggest.
## Pretty tree printer
As I did with the tokenizer, I added a print mode to replace the old `gd_parser` test. It prints the tree in an unambiguous readable way so problems with parsing can be detected.
So a script like this:
```gdscript
class_name MyClass extends Node2D
const SPEED = 200
@export var player_name = "Vladimir"
func _ready():
$Button.button_up.connect(_on_button_pressed)
print("Ready!")
func _on_button_pressed():
print("Player is \"%s\"" % player_name)
func _process(delta):
$Body.move(delta * SPEED)
```
Will be printed like this:
```
Class MyClass Extends Node2D :
| Constant SPEED
| | = 200
| @export ()
| Variable player_name
| | = "Vladimir"
| Function _ready( ) :
| | $Button.button_up.connect( _on_button_pressed )
| | print( "Ready!" )
| Function _on_button_pressed( ) :
| | print( ("Player is "%s"" % player_name) )
| Function _process( delta ) :
| | $Body.move( (delta * SPEED) )
```
## Annotations
Shown briefly in the previous example, I implemented annotations as outlined in [GIP #828](https://github.com/godotengine/godot-proposals/issues/828). Those are meant to replace a few keywords and improve the ability to add new integrations without having to create new keywords and big changes in the parser.
This also means that the old `export` syntax is now extinct in favor of specific annotations. This makes it in general easy to remember and understand and don't require much effort in the GDScript implementation, since their arguments are the same as expected in the internal hint strings.
There will be an [update to the documentation](https://github.com/godotengine/godot-docs/pull/3623) to explain the new way.
## `await` replaces `yield`
The old `yield` syntax was a bit convoluted complicated to understand, forcing you to deal with function states. It was especially harder to understand given it did the opposite of other languages do with the same keyword.
The `await` syntax can be used to wait for signals or coroutines alike:
```gdscript
func coroutine():
await $Button.button_up # Will suspend the function and resume when the button is pressed.
return true
func _ready():
var result = await coroutine() # Will suspend the function and wait for the coroutine result.
print(result) # true
```
## Breaking things to make them anew
I wanted to make this new parser working with the current system sooner rather than later. So I amended the dependencies on the old parser and tokenizer to use the new one. Since this is rather time consuming, a lot of things were disabled in the meantime.
I do mention what's missing in my [initial Pull Request](https://github.com/godotengine/godot/pull/39093) but here is a shortlist:
* Type checking.
* Code completion.
* Warnings.
* Language server.
* `setget` (will be replaced by [properties](https://github.com/godotengine/godot-proposals/issues/844)).
* Some optimizations.
This will be fixed in the next steps
## Future
There's still a long road ahead to make GDScript a better language in general. The next step is to read the type checks so the systems that depend on it can also be reactivated. Hopefully the new type checker will be more precise than the previous one (I learned a lot since then). And it will allow use of some new features like typed arrays.
| |
140957
|
2D: Fix UV editor not using texture transform ([GH-84076](https://github.com/godotengine/godot/pull/84076)).
- 2D: Fix generating terrain icon with certain image formats ([GH-84507](https://github.com/godotengine/godot/pull/84507)).
- 2D: Keep scene tiles even if the TileMap is invisible ([GH-85753](https://github.com/godotengine/godot/pull/85753)).
- 3D: Only allow MeshInstance3D-inherited nodes in MultiMesh Populate Surface dialog ([GH-84933](https://github.com/godotengine/godot/pull/84933)).
- Animation: Fix imported track flag on sliced animations ([GH-85061](https://github.com/godotengine/godot/pull/85061)).
- Animation: Prevent a crash when calling `AnimationMixer::restore` with an invalid resource ([GH-85428](https://github.com/godotengine/godot/pull/85428)).
- Animation: Fix AnimationPlayer seeking for Discrete keys ([GH-85569](https://github.com/godotengine/godot/pull/85569)).
- Animation: Fix Tween loop initial value ([GH-85681](https://github.com/godotengine/godot/pull/85681)).
- Audio: Fix importing WAV files with odd chunk sizes ([GH-85556](https://github.com/godotengine/godot/pull/85556)).
- Buildsystem: Use Python venv if detected when building VS project ([GH-84593](https://github.com/godotengine/godot/pull/84593)).
- Buildsystem: Fix invalid Python escape sequences ([GH-85818](https://github.com/godotengine/godot/pull/85818)).
- Core: Set language encoding flag when using `ZIPPacker` ([GH-78732](https://github.com/godotengine/godot/pull/78732)).
- Core: Fix crash when hashing empty `CharString` ([GH-85389](https://github.com/godotengine/godot/pull/85389)).
- Core: Prevent infinite recursion when printing errors ([GH-85397](https://github.com/godotengine/godot/pull/85397)).
- Core: Fix property groups overriding real properties ([GH-85486](https://github.com/godotengine/godot/pull/85486)).
- Core: Do not reload resources and send notification if locale is not changed ([GH-85787](https://github.com/godotengine/godot/pull/85787)).
- Editor: Remove exp hint of a few properties ([GH-80326](https://github.com/godotengine/godot/pull/80326)).
- Editor: Fix UV editor not showing polygon correctly ([GH-84116](https://github.com/godotengine/godot/pull/84116)).
- Editor: Inspector: Fix clearing array/dictionary element with `<Object#null>` ([GH-84237](https://github.com/godotengine/godot/pull/84237)).
- Editor: Allow dragging editable children ([GH-84310](https://github.com/godotengine/godot/pull/84310)).
- Editor: Fix errors on file rename or move in the Filesystem Dock ([GH-84520](https://github.com/godotengine/godot/pull/84520)).
- Editor: Fix issue with 3D scene drag and drop preview node ([GH-85087](https://github.com/godotengine/godot/pull/85087)).
- Editor: Fix SnapGrid is almost invisble in light theme ([GH-85585](https://github.com/godotengine/godot/pull/85585)).
- Editor: Fix theme application in various editor dialogs ([GH-85745](https://github.com/godotengine/godot/pull/85745)).
- Export: Fix order of operations for macOS template check ([GH-84990](https://github.com/godotengine/godot/pull/84990)).
- **Export: iOS: Use `mdfind` to check if Xcode is installed in one-click deploy code ([GH-85774](https://github.com/godotengine/godot/pull/85774)).**
* It's not obvious from the commit, but this should fix freezes that some macOS users have been experiencing when opening Godot if they don't have Xcode installed.
- GDExtension: Fix updating cached singletons when reloading GDScripts ([GH-85373](https://github.com/godotengine/godot/pull/85373)).
- **GDExtension: Fix crash when using incompatible versions of Godot Jolt ([GH-85779](https://github.com/godotengine/godot/pull/85779)).**
* We knew that old versions of Jolt were crashing in Godot 4.2, but we underestimated how widely used Jolt is, and the fact that it's difficult for users to keep their addons up-to-date with the current Asset Library. So we added a small hack to prevent loading older Jolt versions to avoid that crash. To use Jolt in Godot 4.2, be sure to update it to [0.11.0-stable](https://github.com/godot-jolt/godot-jolt/releases/tag/v0.11.0-stable).
- GDScript: Improve autocompletion with `get_node` ([GH-79386](https://github.com/godotengine/godot/pull/79386)).
- GDScript: Filter groups and categories from autocompletion ([GH-85196](https://github.com/godotengine/godot/pull/85196)).
- GUI: Enable scrolling of output with UI scale changes ([GH-82079](https://github.com/godotengine/godot/pull/82079)).
- GUI: VideoPlayer: Fix reloading translation remapped stream ([GH-84794](https://github.com/godotengine/godot/pull/84794)).
- GUI: Restored Control properties when you undo a parenting of a Control to a Container ([GH-85181](https://github.com/godotengine/godot/pull/85181)).
- GUI: Make sure `Window`'s title is respected before we compute the size ([GH-85312](https://github.com/godotengine/godot/pull/85312)).
- GUI: RTL: Fix CharFX character offset calculation ([GH-85363](https://github.com/godotengine/godot/pull/85363)).
- GUI: Limit window size updates on title change ([GH-85542](https://github.com/godotengine/godot/pull/85542)).
- GUI: Fix size and visuals of the `InputEventConfigurationDialog` ([GH-85790](https://github.com/godotengine/godot/pull/85790)).
- GUI: Limit window size updates on title translation change ([GH-85828](https://github.com/godotengine/godot/pull/85828)).
- Import: Fix memory leak on error paths in tinyexr loader ([GH-85002](https://github.com/godotengine/godot/pull/85002)).
- Import: Fix memory corruption and assert failures in convex decomposition ([GH-85631](https://github.com/godotengine/godot/pull/85631)).
- Input: Fix SubViewport physics picking ([GH-85665](https://github.com/godotengine/godot/pull/85665)).
- Navigation: Fix missing NavigationLink property updates in constructor ([GH-83802](https://github.com/godotengine/godot/pull/83802)).
- Navigation: Fix missing NavigationRegion property updates in constructor ([GH-83812](https://github.com/godotengine/godot/pull/83812)).
- Navigation: Fix missing NavigationAgent property updates in constructor ([GH-83814](https://github.com/godotengine/godot/pull/83814)).
- Navigation: Fix missing NavigationObstacle property updates in constructor ([GH-83816](https://github.com/godotengine/godot/pull/83816)).
- Navigation: Fix memory leak in 'NavigationServer3D' involving static obstacles ([GH-84816](https://github.com/godotengine/godot/pull/84816)).
- Navigation: Fix NavigationRegion2D transform update ([GH-85258](https://github.com/godotengine/godot/pull/85258)).
- Particles: Only allow MeshInstance3D-based nodes in particles emission shape node selector ([GH-84891](https://github.com/godotengine/godot/pull/84891)).
- Plugin: Correctly check scripts that must inherit `EditorPlugin` ([GH-85271](https://github.com/godotengine/godot/pull/85271)).
-
| |
140975
|
---
title: "Multiplayer in Godot 4.0: Scene Replication"
excerpt: "Create multiplayer games in an instance (pun intended) with the new MultiplayerSpawner and MultiplayerSynchronizer nodes.
Check out the key concepts, and get started with a quick tutorial on how to make a simple game using Godot multiplayer features!"
categories: ["progress-report"]
author: Fabio Alessandrelli
image: /storage/blog/covers/multiplayer-in-godot-4-0-scene-replication.jpg
date: 2023-02-23 11:30:00
---
Howdy Godotters! It's been more than a year since I last wrote on this blog, "... blah blah, blah blah, blah" (cf. [C. L. C. Chuckie](https://en.wikipedia.org/wiki/LeChuck)), and with Godot 4.0 nearing release, it's time to write something to jumpstart you into creating your own multiplayer experience with Godot.
Since the [last blog post](/article/multiplayer-changes-godot-4-0-report-4), we rewrote the scene replication API to be more user-friendly, adding two "configuration" nodes:
- The [`MultiplayerSpawner`](https://docs.godotengine.org/en/latest/classes/class_multiplayerspawner.html) node to configure where nodes can be remotely instantiated by which peer.
- The [`MultiplayerSynchronizer`](https://docs.godotengine.org/en/latest/classes/class_multiplayersynchronizer.html) node to configure which node properties can be synchronized by which peer.
So, without further ado let's see how to create a simple multiplayer game using these new features (jump to the bottom for the full project).
## Scene setup
First of all, let's setup our multiplayer scene:

And wire up the signals to start a server or client:
```
# multiplayer.gd
extends Node
const PORT = 4433
func _ready():
# Start paused.
get_tree().paused = true
# You can save bandwidth by disabling server relay and peer notifications.
multiplayer.server_relay = false
# Automatically start the server in headless mode.
if DisplayServer.get_name() == "headless":
print("Automatically starting dedicated server.")
_on_host_pressed.call_deferred()
func _on_host_pressed():
# Start as server.
var peer = ENetMultiplayerPeer.new()
peer.create_server(PORT)
if peer.get_connection_status() == MultiplayerPeer.CONNECTION_DISCONNECTED:
OS.alert("Failed to start multiplayer server.")
return
multiplayer.multiplayer_peer = peer
start_game()
func _on_connect_pressed():
# Start as client.
var txt : String = $UI/Net/Options/Remote.text
if txt == "":
OS.alert("Need a remote to connect to.")
return
var peer = ENetMultiplayerPeer.new()
peer.create_client(txt, PORT)
if peer.get_connection_status() == MultiplayerPeer.CONNECTION_DISCONNECTED:
OS.alert("Failed to start multiplayer client.")
return
multiplayer.multiplayer_peer = peer
start_game()
func start_game():
# Hide the UI and unpause to start the game.
$UI.hide()
get_tree().paused = false
```
Then setup our world scene with some physics objects, and add it to the multiplayer scene:

## Synchronizing properties
So, now that we have our scene set up and peers can connect, let's add a [MultiplayerSynchronizer](https://docs.godotengine.org/en/latest/classes/class_multiplayersynchronizer.html) to our object scene and configure it to sync some of its properties.
Synchronized properties can be configured by selecting them from a list or entering their path.

Running the game now you will notice that the objects start synchronizing as soon as the client connects.
*Tip:* You can run and debug multiple instances simultaneously from the editor by changing the value in `Debug -> Run Multiple Instances`.

## Spawning and despawning scenes
The [MultiplayerSpawner](https://docs.godotengine.org/en/latest/classes/class_multiplayerspawner.html) node automates the process of replicating dynamically instantiated nodes across peers, including when joining mid-game or reconnecting.
This mostly boils down to the following MultiplayerSpawner properties:
- `spawn_path`: Defines the path where the spawner will add the nodes it instantiates.
- `Auto Spawn List`: Defines the scenes to be automatically replicated when added as a child of `spawn_path` by the authority (server by default).
- `spawn_function`: (Optional) Defines a function to be called on all peers when using the `spawn` custom instantiation method.
You can use the [`set_multiplayer_authority()`](https://docs.godotengine.org/en/latest/classes/class_node.html#class-node-method-set-multiplayer-authority) method to control which peer is allowed to instantiate scenes via the spawner (the server by default).
## Selecting levels
Currently, using `get_tree().change_scene_to_packed()` (or `change_scene_to_file()`) during a multiplayer session can be problematic when players join mid-game or re-join a match. While we are working on supporting the `change_scene*` methods out of the box in future Godot releases, it is currently possible to simulate the behavior using a MultiplayerSpawner to spawn the level scene.
This ensures that if our game needs to change the active level, the appropriate one will also be instantiated on connected clients as well as on newly connected ones.
To do that, let's remove our static world from the multiplayer scene, add a MultiplayerSpawner node, and configure it with our level(s) in the "Auto Spawn List".

Now let's add a `change_level()` function which instantiates the (selected) level and call it from the `start_game()` function.
```
func start_game():
# Hide the UI and unpause to start the game.
$UI.hide()
get_tree().paused = false
# Only change level on the server.
# Clients will instantiate the level via the spawner.
if multiplayer.is_server():
change_level.call_deferred(load("res://level.tscn"))
# Call this function deferred and only on the main authority (server).
func change_level(scene: PackedScene):
# Remove old level if any.
var level = $Level
for c in level.get_children():
level.remove_child(c)
c.queue_free()
# Add new level.
level.add_child(scene.instantiate())
# The server can restart the level by pressing Home.
func _input(event):
if not multiplayer.is_server():
return
if event.is_action("ui_home") and Input.is_action_just_pressed("ui_home"):
change_level.call_deferred(load("res://level.tscn"))
```
Now the level will be instantiated by the server as soon as it starts, and clients will instantiate it as soon as they connect.
The server can also call `change_level()` at any moment to restart the current level, or to select a different one (as long as the selected level is in the "Auto Spawn List" of all peers).
Additionally, when scenes controlled by a multiplayer spawner contain a multiplayer synchronizer referencing the root node of the scene, the configured "Spawn" properties will be automatically set on remote peers during the spawning process.
Similarly to what we did in the multiplayer scene, we could add one or more MultiplayerSpawner to the level scene to replicate nodes that have a dynamic lifecycle like bullets, powerups, etc.
*Tip:* You can further customize the number of nodes that can be spawned remotely at any given time via the `spawn_limit` property. In this case we can set `spawn_limit = 1` since only one level is allowed to be active at any given time.
| |
140976
|
## Spawning players
For the player characters we usually need to transfer part of the authority over to the peer which each character represents.
In this scenario it is good practice to use a child node dedicated to the player inputs, and leave the multiplayer authority of the character nodes to the server. This helps maintaining proper isolation between controls and game logic, making the setup less error-prone.
Keeping that in mind, let's create the player scene with a synchronizer for the character itself, and a synchronizer for the player input.

We then attach a script to the player input synchronizer and configure it to gather the local input based on the configured authority.
```
# player_input.gd
extends MultiplayerSynchronizer
# Set via RPC to simulate is_action_just_pressed.
@export var jumping := false
# Synchronized property.
@export var direction := Vector2()
func _ready():
# Only process for the local player.
set_process(get_multiplayer_authority() == multiplayer.get_unique_id())
@rpc("call_local")
func jump():
jumping = true
func _process(delta):
# Get the input direction and handle the movement/deceleration.
# As good practice, you should replace UI actions with custom gameplay actions.
direction = Input.get_vector("ui_left", "ui_right", "ui_up", "ui_down")
if Input.is_action_just_pressed("ui_accept"):
jump.rpc()
```
We handle jumping with a reliable RPC (we don't want to miss the player jumping action!), while we use the synchronizer itself to constantly sync the direction the user wants to go.
We can then move the player character on the main authority by reading the synchronized input state:
```
# player.gd
extends CharacterBody3D
const SPEED = 5.0
const JUMP_VELOCITY = 4.5
# Get the gravity from the project settings to be synced with RigidBody nodes.
var gravity = ProjectSettings.get_setting("physics/3d/default_gravity")
# Set by the authority, synchronized on spawn.
@export var player := 1 :
set(id):
player = id
# Give authority over the player input to the appropriate peer.
$PlayerInput.set_multiplayer_authority(id)
# Player synchronized input.
@onready var input = $PlayerInput
func _ready():
# Set the camera as current if we are this player.
if player == multiplayer.get_unique_id():
$Camera3D.current = true
# Only process on server.
# EDIT: Let the client simulate player movement too to compesate network input latency.
# set_physics_process(multiplayer.is_server())
func _physics_process(delta):
# Add the gravity.
if not is_on_floor():
velocity.y -= gravity * delta
# Handle jump.
if input.jumping and is_on_floor():
velocity.y = JUMP_VELOCITY
# Reset jump state.
input.jumping = false
# Handle movement.
var direction = (transform.basis * Vector3(input.direction.x, 0, input.direction.y)).normalized()
if direction:
velocity.x = direction.x * SPEED
velocity.z = direction.z * SPEED
else:
velocity.x = move_toward(velocity.x, 0, SPEED)
velocity.z = move_toward(velocity.z, 0, SPEED)
move_and_slide()
```
We will then use the server synchronizer to keep the position and velocity in sync and to set the player id on spawn (which will in turn configure the appropriate input authority).

Finally, we can add a multiplayer spawner to our level, and use it to spawn players as they connect.

```
# level.gd
extends Node3D
const SPAWN_RANDOM := 5.0
func _ready():
# We only need to spawn players on the server.
if not multiplayer.is_server():
return
multiplayer.peer_connected.connect(add_player)
multiplayer.peer_disconnected.connect(del_player)
# Spawn already connected players.
for id in multiplayer.get_peers():
add_player(id)
# Spawn the local player unless this is a dedicated server export.
if not OS.has_feature("dedicated_server"):
add_player(1)
func _exit_tree():
if not multiplayer.is_server():
return
multiplayer.peer_connected.disconnect(add_player)
multiplayer.peer_disconnected.disconnect(del_player)
func add_player(id: int):
var character = preload("res://player.tscn").instantiate()
# Set player id.
character.player = id
# Randomize character position.
var pos := Vector2.from_angle(randf() * 2 * PI)
character.position = Vector3(pos.x * SPAWN_RANDOM * randf(), 0, pos.y * SPAWN_RANDOM * randf())
character.name = str(id)
$Players.add_child(character, true)
func del_player(id: int):
if not $Players.has_node(str(id)):
return
$Players.get_node(str(id)).queue_free()
```
We can now run the game and test that connecting to a host will correctly spawn the appropriate level and players, give us control of our character, and properly synchronize the object positions.

Here is the [full project source](/storage/blog/multiplayer-in-godot-4-0-scene-replication/project.zip), compatible with current 4.0 RC releases.
And that's it for this long awaited introductory post to the new replication system. More advanced topics like bandwidth optimizations, spawning customized scenes and the visibility system will be discussed in a separate tutorial.
## Reference work
Design proposals: [#3359](https://github.com/godotengine/godot-proposals/issues/3359) and [#3459](https://github.com/godotengine/godot-proposals/issues/3459).
Initial implementation: [#55950](https://github.com/godotengine/godot/pull/55950).
And yes, as the top image hints we are adding multiplayer features to the TPS demo, so as always stay tuned for more ;).
| |
140988
|
---
title: "Godot 3.2 will get pseudo 3D support in 2D engine"
excerpt: "Godot support for 2D is already mature and most of our users enjoy working with it. There is, however a growing trend of adding 3D layers to 2D games, which can be seen in successful titles such as Hollow Knight or Rayman Origins."
categories: ["progress-report"]
author: Juan Linietsky
image: /storage/app/uploads/public/5ca/776/3d9/5ca7763d99acc514180027.png
date: 2019-04-05 00:00:00
---
# Pseudo 3D support
Godot support for 2D is already mature and most of our users enjoy working with it. There is, however a growing trend of adding 3D layers to 2D games, which can be seen in successful titles such as [Hollow Knight](https://www.youtube.com/watch?v=nvzUzQbkikY) or [Rayman Origins](https://www.youtube.com/watch?v=_umLnGZZBrg).
This is possible because the engines in which such games are made are actual 3D engines using 2D planes. In the end, doing this is possible but it requires understanding and being familiar with how 3D engines work.
#### Same technique in Godot
This is also possible in Godot using the 3D engine, via nodes such as [Sprite3D](https://docs.godotengine.org/en/3.1/classes/class_sprite3d.html), but truth is that this technique requires user to do more work and understand more about how 3D space functions.
Parallax (via [ParralaxBackground](https://docs.godotengine.org/en/3.0/classes/class_parallaxbackground.html)) works to some extent, but it's more aimed to be used as a far away background rather than adding depth to the game play area.
Because of this, a new way to implement pseudo 3D was added to the 2D engine, so the same results can be obtained with pure 2D programming, making it much easier to develop this type of games while using all the existing 2D tools (and assets) Godot provides.
### Canvas layers
Godot 2D engine already has a node named [CanvasLayer](https://docs.godotengine.org/en/3.0/tutorials/2d/canvas_layers.html). Everything that is a child or grandchild of it will render on this layer. The layer index can be specified as a number (and it´s also tree-ordered) to control depth placement, so no real Z positioning is required.
Using this node is a common practice to make user interfaces, because they won´t scroll together with the rest of the Viewport, as layers can move independently.
Yet, moving together with the viewport may actually be desired, so a new option was added: "Follow Viewport". Toggling it on will ensure that this layer will move together with the actual viewport when using a [Camera2D](https://docs.godotengine.org/en/3.1/classes/class_camera2d.html).
### Adding depth
Together with *Follow Viewport*, there is a new "Scale" property. This value allows scaling the canvas while following the viewport. It's a simple setting but it can be used to do "Pseudo 3D" with ease:
<iframe width="560" height="315" src="https://www.youtube.com/embed/CWZvPZ5mGmY" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
In practice, just create your level using several CanvasLayer nodes. Edit them together as if you were using an *Orthogonal* view in 3D:

In the above screenshots, a few CanvasLayer nodes were created. Aftwerwards, the TileMap from the 2D Platformer demo was brute-duplicated and put in every single of them.
Finally, the "Follow Viewport" option was enabled together with a a scale value. This enables the pseudo 3D effect in each layer.
### Previewing without running the game
Previewing the effect in the editor is very easy, just use the new "Preview Canvas Scale" option in the 2D view menu:
<iframe width="560" height="315" src="https://www.youtube.com/embed/CE1GIakmHR0" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
### Limitations
The main limitation of this technique is that it's not possible to move a single object in multiple layers, as they will definitely need to be separate ones (none of the games mentioned above use this, though). This can still be worked around with a [RemoteTransform2D](https://docs.godotengine.org/en/3.1/classes/class_remotetransform2d.html) node, though requires a bit more work.
### Future
This is an experimental feature, so don't forget to give feedback! Also, if you are not one, please consider [becoming our patron](https://www.patreon.com/godotengine), to help accelerate Godot development.
| |
141036
|
---
title: "Introducing C# in Godot"
excerpt: "The next alpha release of Godot 3.0 is about to be published and it will be the first version that ships with C# support. This post gives an introduction to C# scripting in Godot and how to use it in your project."
categories: ["progress-report"]
author: Ignacio Roldán Etcheverry
image: /storage/app/uploads/public/59e/b96/e71/59eb96e712765766078430.png
date: 2017-10-21 19:19:02
---
Alpha2 is around the corner and I'm glad to announce that it will come with the first usable version of C# as a Godot scripting language.
It's still at an early stage and there is a lot of work ahead, but it's a good opportunity for users to have a first look at it and return feedback.
## Acknowledgements
We did not communicate much about it until the C# support was ready for broader testing in the master branch, but Juan and I have been working on this feature as contractors for the Godot project (I as an intern and Juan as mentor/advisor).
Indeed, when we decided to implement Mono/C# as a scripting language in Godot over a year ago, we reached out to Microsoft with Miguel de Icaza's support to see if they would consider funding this work with a donation. They did, and I'm glad to announce that we received a $24,000 donation from Microsoft, which we used to fund my and Juan's work via our non-profit charity Software Freedom Conservancy.
On behalf of the Godot team, I would like to take this opportunity to thank again Microsoft and Miguel de Icaza for their huge generosity in funding our work on this project.
## Introduction
In order to bring C# programming to Godot, we are [embedding](http://www.mono-project.com/docs/advanced/embedding/) the Mono runtime into the engine. As of alpha2, Godot is using Mono 5.2 and [C# 7.0](https://blogs.msdn.microsoft.com/dotnet/2017/03/09/new-features-in-c-7-0/) is supported.
I will write more posts about the internals and how things work in the future but, for this one, I would like to focus on introducing the language and how to write Godot scripts with it.
## From GDScript to C#* *
The following will be a short tutorial explaining the basics of C# programming in Godot and some of the differences you can expect when coming from a language like GDScript.
### Scripts and classes
The first thing you must know is how to declare the script class. Unlike GDScript, in C# you must declare your script class explicitly. A file can have many classes; a class must have the same name as its file (case sensitive) in order for Godot to know that it's the script class:
```cs
// Coin.cs
using Godot; // Namespace that contains all Godot types
// Class Coin has same name as its file. Godot will detect it
public class Coin : Node
{
public override void _Ready()
{
GD.print("Hello, Godot!");
}
}
```
The above example would be the equivalent to GDScript's:
```gdscript
extends Node
func _ready():
print("Hello, Godot!")
```
As you can see, the C# API's naming convention uses PascalCase _(Note: As of alpha2, there are still a few names in snake\_case. Renaming will be finished for the next alpha or beta release)_. Global scope methods and constants are available in the `GD` class, except those that are math related are located in the `Mathf` class.
### Static typing
Let's check the following code in GDScript:
```gdscript
var player = get_node("player")
# player is an instance of type KinematicBody2D
var on_floor = player.is_on_floor()
```
If you try the following in C#, it will fail to compile:
```cs
Node player = GetNode("player");
// Error: 'Node' does not contain a definition for 'IsOnFloor'
bool onFloor = player.IsOnFloor();
```
This happens because C# is statically typed. `GetNode` can return an instance of any type that derives from `Node`, but `Node` itself does not declare the method `IsOnFloor`. You must cast the instance to `KinematicBody2D` in order to use that method:
```cs
KinematicBody2D player = (KinematicBody2D)GetNode("player");
// or a shorter way, thanks to type inference
var player = (KinematicBody2D)GetNode("player");
bool onFloor = player.IsOnFloor(); // Compiles
```
Note that the previous cast will throw an exception if the returned instance is not of type `KinematicBody2D`. If you prefer it, you can use the `as` keyword, which will return `null` instead:
```cs
var scene = ResourceLoader.Load("res://player.tscn") as PackedScene;
if (scene != null)
AddChild(scene.Instance());
else
GD.print("Not of type PackedScene");
```
Another way to check if an instance is of a type in specific, is by using the `is` keyword:
```cs
public override void _Input(InputEvent ev)
{
if (ev is InputEventMouse)
{
var mouseEvent = (InputEventMouse)ev;
Vector2 mousePos = mouseEvent.Position;
}
else if (ev is InputEventKey)
{
var keyEvent = (InputEventKey)ev;
bool aPressed = keyEvent.Pressed && keyEvent.Scancode == GD.KEY_A;
}
}
```
### Other differences
There are many more examples to mention; for the beta release, we will have a page in the documentation with a list. Here are a few more that I deem important:
- The default constructor for `Basis`, `Transform2D` and `Quat` initializes all fields to the default value. If you want the same initial value as in GDScript, you can use `Basis.Identity`, `Transform2D.Identity` and `Quat.Identity`.
- A few methods of the API return `Variant`. This means they can return anything. In C# those methods return `object` (`System.Object`).
#
| |
141167
|
# Downloads
{% include articles/download_card.html version="4.3" release="rc1" article=page %}
**Standard build** includes support for GDScript and GDExtension.
**.NET build** (marked as `mono`) includes support for C#, as well as GDScript and GDExtension.
- See also [C# platform support](https://docs.godotengine.org/en/latest/tutorials/scripting/c_sharp/index.html#c-platform-support).
If you want to test the new Windows ARM64 builds, they're not integrated in our download page yet, so here are direct links:
- [Editor for Windows ARM64 (Standard)](https://github.com/godotengine/godot-builds/releases/download/4.3-rc1/Godot_v4.3-rc1_windows_arm64.exe.zip)
- [Editor for Windows ARM64 (.NET)](https://github.com/godotengine/godot-builds/releases/download/4.3-rc1/Godot_v4.3-rc1_mono_windows_arm64.zip)
{% include articles/prerelease_notice.html %}
## Known issues
During the Release Candidate stage, we focus exclusively on solving showstopping regressions (i.e. something that worked in a previous release is now broken, without workaround). You can have a look at our current [list of regressions and significant issues](https://github.com/orgs/godotengine/projects/61) which we aim to address before releasing 4.3. This list is dynamic and will be updated if we discover new blocking issues after more users start testing the RC snapshots.
With every release, we are aware that there are going to be various issues which have already been reported but haven't been fixed yet, due to limited resources. See the GitHub issue tracker for a complete list of [known bugs](https://github.com/godotengine/godot/issues?q=is%3Aissue+is%3Aopen+label%3Abug+).
## Bug reports
As a tester, we encourage you to [open bug reports](https://github.com/godotengine/godot/issues) if you experience issues with this release. Please check the [existing issues on GitHub](https://github.com/godotengine/godot/issues) first, using the search function with relevant keywords, to ensure that the bug you experience is not already known.
In particular, any change that would cause a regression in your projects is very important to report (e.g. if something that worked fine in previous 4.x releases, but no longer works in this snapshot).
## Support
Godot is a non-profit, open source game engine developed by hundreds of contributors on their free time, as well as a handful of part or full-time developers hired thanks to [generous donations from the Godot community](https://fund.godotengine.org/). A big thank you to everyone who has contributed [their time](https://github.com/godotengine/godot/blob/master/AUTHORS.md) or [their financial support](https://github.com/godotengine/godot/blob/master/DONORS.md) to the project!
If you'd like to support the project financially and help us secure our future hires, you can do so using the [Godot Development Fund](https://fund.godotengine.org/) platform managed by [Godot Foundation](https://godot.foundation/). There are also several [alternative ways to donate](/donate) which you may find more suitable.
| |
141174
|
---
title: "Optional typing in GDScript"
excerpt: "Exposing the new addition to GDScript: optional type hints and all the perks it brings."
categories: ["progress-report"]
author: George Marques
image: /storage/app/uploads/public/5b1/f4f/b2f/5b1f4fb2f023a968578982.png
date: 2018-07-22 17:11:27
---
While GDScript is made with the ease-of-use in mind, many people wanted to add type information into their scripts. This helps avoiding potential bugs and also allows a better code completion. We're now introducing an additional syntax to add type hints to your GDScript code. It is completely optional and old scripts will work as they always did.
**Note:** This is a new feature in the *master* branch, and will be available in Godot 3.1.
## How the type hints can help?
It is quite common to use a variable only with values of the same type. With the dynamic nature of GDScript, you can inadvertently overwrite a variable with a different type and break your code logic in a way that might be hard to realize.
For instance, if your function expects a number, but you missed an input validation somewhere and is passing a string to it instead, you only will see an error at runtime (and only if you reach that point). In a larger code base, it's quite easy to miss things like that.
With type hints, Godot can know beforehand that you are passing the wrong type and show an error while you are editing the related code, even if you never run it.
## Syntax
This is quite a controversial topic (like tabs vs. spaces) and everyone has their own preference that's based on their background in other languages. We adopted a post-fixed type syntax that is very similar to Python (yes, Python has type hints too). This was chosen because GDScript is already similar to Python and this style is easy to integrate in the language parser, also considering that it is still optional.
For variables and constants, you can add a type hint with a colon (`:`) after the name when declaring it. Constants don't need type hints, since their type is inferred from the assignment, but you can add one as well:
```
const GRAVITY : Vector2 = Vector2(0, 9.8)
var power : float = 150.0
```
Type can be inferred from the assigned value if you add the colon (`:`) but omit the type. This can make the code more concise:
```
var direction_vector : = Vector2(1, 0)
# direction_vector is of type Vector2 because of the assigned constant
var my_sprite := $MySprite as Sprite
# my_sprite is of type Sprite because of casting
```
Functions follow the same syntax for the arguments. For the return type, you use a forward arrow (`->`) along with type before the colon to start the function body:
```
func my_func(arg1 : int, arg2 : String = "") -> void:
return
```
Casting types is accomplished by using the `as` keyword. For built-in types, it errors out if it's not possible to convert. For objects it simply returns `null`:
```
var number_input : int = $LineEdit.text as int
var my_sprite : Sprite = $Sprite as Sprite
```
The type can be: a built-in Variant type (`int`, `Vector3`, `Color`, `Basis`, etc.); a core class (`Node`, `Resource`, `File`, etc.); a named constant that contains a Script resource (`const MyClass = preload("res://my_class.gd"`); a named script class (one made with the `class_name` syntax).
## Introducing the concept of "safe lines"
As suggested by our dear core developer reduz (Juan Linietsky), GDScript now has "safe lines" marked in the editor. The reasoning behind this is that the duck-typing should still work as it always did, so it's not possible to error if some variable or function is not defined on the accessed class.
Something very common is to tell an animation player to start like this:
```
$AnimationPlayer.play("walk")
```
With the type inference, Godot knows that `$AnimationPlayer` is a `Node` but can't tell which subtype it really is (in this case an `AnimationPlayer`). Since the `play()` function is not defined on the inferred type (Node), the engine don't know at this point if it really exists, nor the types for arguments and returns.
A statically typed language, such as C++ and C#, would force you to explicitly cast to the specific subtype, otherwise it would throw an error. So you'd need to do this:
```
($AnimationPlayer as AnimationPlayer).play("walk")
```
This is inconvenient most of the time for GDScript. Many times it's not even really needed. Instead of erroring out, the editor now shows a subtle greenish highlight in the lines that are safe according to type hints:

Some users might find this *too* subtle. This is actually the idea: it should not stand out for people who aren't really interested in it. In any case, you can change the color in the editor settings and also disable the highlight if you prefer the old full-dynamic style.
## Code completion
The type inference code to provide completion candidates and function hints was rewritten. It fixed some long-standing annoying bugs. The experience is much improved, even if you are not using the type hints.
If your code has type hints, it provides a proper completion even across scripts. This includes singletons and scripts attached to other nodes in the tree.
| |
141175
|
## How it was implemented
Since many people seem to be interested in the devblogs, I'll add a section here explaining how I did the changes to enable the optional typing in GDScript.
### Tokenizer
The first part of parsing a language is to split the code in recognizable *tokens*. The symbols (`+`, `>=`, `|`), keywords (`if`, `func`, `class`), literal values (`2`, `"string"`), and identifiers ( `Vector2`, `Node`, `my_var`) are all changed to an abstraction This helps the next phase, since it won't need to deal with text anymore, but just a series of tokens. Then all whitespace and comments will simply be ignored and don't have to be taken into consideration.
The changes in the tokenizer are minimal, only to introduce the forward arrow symbol (`->`) and the casting keyword (`as`).
A peculiarity in the compiled scripts on export is that they are stored as a sequence of tokens. Using this new tokens in an old Godot version will cause a big problem. This is avoided by having a version number that was incremented with this change. So compiled scripts in this version won't run with older export templates (that's one reason why it's important to match editor and template version).
### Parser
The next step is make the new syntax be recognized by the parser. A parser takes the sequence of tokens and from them develop a tree structure. This tree resolves what each line is doing and also sorts out the order of operations based on their precedence.
The GDScript parser is relatively simple, so it's just a matter of understanding the small decisions it make to know what's a function declaration, what's a function call, what's an assignment, etc. I looked into the declarations of variables, constants, and functions to enable the parser to recognize the type hints (though it would simply discard them for now). The expression-parsing routine was also change to detect the casting operation.
Casting is a new type of node in the tree, since it'll be handled by the compiler in a particular way.
### Moving inheritance resolution
The old GDScript only resolved the inheritance when compiling the script, which only happens if you save the script and show errors only when you try to load it (i.e. not when you edit it). However, the parser needs this information to know the types of members declared in the parent.
I moved this code from the compiler to the parser (with all the needed adjustments). The compiler simply uses the information from the parser to avoid looking it again. There was also another change in the compiler to fix the order if you are using inner classes, allowing you to reference classes later in the file without problems.
### Static type checks
Now the parser makes the tree and determine the inheritance. Then a pass on the built tree is needed to check if the types used in the code are compatible or not. It is by far the biggest change in the code.

This is done by looking into each statement and applying type-resolution and type-checks depending of the kind of statement. E.g. if it's an assignment, check if the expression type matches the variable being assigned. This also requires knowing what are the base types of the objects, which can be solved now that the inheritance is resolved in the parser itself.
Another required change is having autoload singletons loaded in the editor. This way the parser can look at them as well when trying to figure out the types. That change was added earlier with another pull request.
### New typed instructions
After the parser is done, changes are need in the compiler and the GDScript bytecode to support the type information. GDScript bytecode can be seen as kind of "machine language" that runs on top of the engine core. The compiler is responsible to take the parse tree and create this lower level code.
The new instructions allow a runtime-check in the types that could not be determined in the parser. Albeit this might make things *slower*, it guarantees that the variables will always have the proper type. Since it's different dealing with built-in types, core classes, and scripts, three new assignment instructions were created to deal with each of them. The same applies to the casting operation.
The compiler changes are required to make use of those new instructions, and also to handle the new type of parser node (for casting).
### Editor niceties
With the backend done, it was time to move into the editor. First and somewhat simpler, I added a colored highlight to the type hints, to help visualizing then in the code. Thanks to the work of Paul Batty ([Paulb23](https://github.com/Paulb23)) who isolated the syntax highlighter to the GDScript module, it was simpler to add this small change that didn't need to affect the core.
For the safe lines, however, it required a change in TextEdit and in the script editor to support it.
The completion code was rewritten. It was quite hacked since it grew organically over time, so it was hard to add new features to it. With the new code it can better make use of the type hints, and also fix some long-standing bugs that made the completion not so pleasant to use.
The new code use the old only as reference. The type inference was improved to consider some other cases (like the returned value from a function) and also to heavily rely on recursive structures, making it simpler to supported indexed variables and methods.
## Future
With optional typing, it's possible to improve GDScript further in other directions. Some of the benefits may also apply to people who are not using the type hints. The ideas for the futures are:
co
- Enable type hints for signals.
- Add a system to show warnings in the editor. This allows the game developers to catch things that are not invalid but may cause issues.
- New instructions for typed code to allow faster execution. This includes avoiding the Variant evaluator and using typed arrays as well.
- Further optimizations in the compiling stage to remove redundant instructions, reduce constant expressions to final constant, among other things.
| |
141185
|
---
title: "Editor improvements for Godot 4.0"
excerpt: "If you are following my progress, you might have noticed that I took a two month break from rendering to work on many long standing editor improvements and features."
categories: ["progress-report"]
author: Juan Linietsky
image: /storage/app/uploads/public/606/23c/e43/60623ce430cfe148969930.jpeg
date: 2021-03-29 00:00:00
---
If you are following me on [Twitter](https://twitter.com/reduzio) (where I post my progress on different Godot features I work on), you might have noticed that I took a two month break from rendering to work on many long standing editor improvements and features.
While I am far from being the only contributor working on editor-related tasks, I put together a list of everything I have been working for the past two months!
### Improved Subresource Editing
With the new inspector in Godot 3.1, a lot of new possibilities opened, including the ability to open sub-resources in the same inspector tab. Before this, users had to go back and forth in the list of properties of each sub-resource, which was very confusing.
The new inspector worked well, but on the visual side we never really managed to nail how to deal with sub-resources. After asking the community for help and ideas, some work as put into it which hugely improved usability.

The final version does a bit less color shifting by default so it's a bit more homogeneous. Also, the new layout makes it much clearer where each subresource begins and ends.
### Improved and reorganized Project Settings
The Project Settings dialog has seen a makeover. The categories have been reorganized to make more sense and reduce bloat and a new "advanced" mode has been introduced.
This new mode removes most project settings other than the basic ones to ensure that new users don't feel overwhelmed by the huge amount of options and flexibility and can learn their way through the most important customization options available.
Once confident enough, the "advanced" tab is set, which allows for editing of the rest of the settings as well as the extra customization options.

Once enabled, the "advanced" setting is remembered for the current project.
### Improved Process/Pause mode
While not entirely an editor feature, Godot 4.0 unifies the process and pause settings into a single menu. This allows for disabling of nodes in a tree fashion. This was one of the most user-requested features, as Godot allows for easily hiding nodes but not disabling them.

Additionally, as can be seen above the scene tree editor will show the disabled nodes in a more grayed out fashion.
### Preview Sun and Sky
Another very requested feature was also implemented, which is the ability to have a preview light and preview sun in the 3D editor. The new dialog was created mainly with two goals:
* Allow to have a quick frame of reference regarding to lighting when importing or editing 3D scenes stand-alone.
* Give new Godot users the ability to visualize their assets with a default set of high quality settings, as it is always a common source of confusion that, when just imported, assets look too plain in Godot.

As these settings are only meant for preview, they won't be visible when running the game but both have a quick way to create the _actual_ nodes based on these settings with just a button press at the bottom.
When either nodes (DirectionalLight3D or WorldEnvironment) exist in a scene, the preview setting also disables automatically, ensuring consistency and ease of use.
### Default Importer Settings
Another common problem Godot users run into is that setting default values for certain types of imported assets was confusing. This is resolved by the new "Default Importer Settings" tab in the Project Settings dialog.

Thew new tab allows to precisely customize importer options for each type of resource. This feature was also back-ported to Godot 3 and will be available on the upcoming Godot 3.3 release.
### New 3D asset import workflow
Importing 3D assets was a hit or miss experience in Godot. Import options were limited to a tiny "Import" menu that attempted to do too much and fell short.
The new importer has an "Advanced" mode, which allows to precisely customize every individual node, mesh, material and animation.

Additionally, handling of external assets was re-thought. In Godot 3.x, assets are simply saved to file by name, which can be very confusing or create chaotic situations when overwriting files.
In the new importer, this process is done via manual steps, so the user has more control on which assets are moved to external files, which paths are used, etc.

As a result, it's more obvious where everything goes and what's happening during the import process.
The new system also solves the problem of assigning external materials to replace the ones in the imported file in a very elegant way, allowing to either make the materials that come with the asset external, or just replace them by existing external ones.
### Ability to "keep" files
Often, users would prefer that Godot does not import some files (like PNG, CSV, etc) and deal with them manually during the game run-time.
This is now possible with the "keep" option. When selected, the assets will be kept and put in the game export "as-is".

### Threaded importing
Another very common problem users face in Godot is the long time it takes to import large amounts of images. To aid this, the new importer has been reworked to operate using multiple threads.
This results in a performance improvement of over ten times (if you have a modern computer with multiple cores).
### Future
With the editor work done, I will now go back to working on rendering for the next month to finalize the missing bits and pieces pending in my TODO list. Afterwards, it will be time to start working towards our first Godot 4.0 alpha! And again, remember we do this out of love for you and the game development community so you can have the best possible engine we can make with the same freedom as if you made it yourself.
If you are not, please consider [becoming our patron](https://www.patreon.com/godotengine)!
| |
142774
|
Important concepts and terminology
#### Prompts & Completions
The completions endpoint is the core component of the API service. This API provides access to the model's text-in, text-out interface. Users simply need to provide an input prompt containing the English text command, and the model will generate a text completion.
Here's an example of a simple prompt and completion:
> Prompt:` """ count to 5 in a for loop """ `
> Completion:` for i in range(1, 6): print(i) `
#### Tokens
The Azure OpenAI Service and OpenAI Enterprise process text by breaking it down into tokens. Tokens can be words or just chunks of characters. For example, the word “hamburger” gets broken up into the tokens “ham”, “bur” and “ger”, while a short and common word like “pear” is a single token. Many tokens start with a whitespace, for example “ hello” and “ bye”.
The total number of tokens processed in a given request depends on the length of your input, output and request parameters. The quantity of tokens being processed will also affect your response latency and throughput for the models.
#### Resources
The Azure OpenAI Service is a new product offering on Azure. You can get started with the Azure OpenAI Service the same way as any other Azure product where you [create a resource](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal), or instance of the service, in your Azure Subscription.
#### Deployments
Once you create an Azure OpenAI Service Resource, you must deploy a model before you can start making API calls and generating text. This action can be done using the Deployment APIs. These APIs allow you to specify the model you wish to use.
#### In-context learning
The models used by the Azure OpenAI Service use natural language instructions and examples provided during the generation call to identify the task being asked and skill required. When you use this approach, the first part of the prompt includes natural language instructions and/or examples of the specific task desired. The model then completes the task by predicting the most probable next piece of text. This technique is known as "in-context" learning. These models aren't retrained during this step but instead give predictions based on the context you include in the prompt.
There are three main approaches for in-context learning: Few-shot, one-shot and zero-shot. These approaches vary based on the amount of task-specific data that is given to the model:
**Few-shot**: In this case, a user includes several examples in the call prompt that demonstrate the expected answer format and content. The following example shows a few-shot prompt where we provide multiple examples:
Convert the questions to a command:
Q: Ask Constance if we need some bread
A: send-msg `find constance` Do we need some bread?
Q: Send a message to Greg to figure out if things are ready for Wednesday.
A: send-msg `find greg` Is everything ready for Wednesday?
Q: Ask Ilya if we're still having our meeting this evening
A: send-msg `find ilya` Are we still having a meeting this evening?
Q: Contact the ski store and figure out if I can get my skis fixed before I leave on Thursday
A: send-msg `find ski store` Would it be possible to get my skis fixed before I leave on Thursday?
Q: Thank Nicolas for lunch
A: send-msg `find nicolas` Thank you for lunch!
Q: Tell Constance that I won't be home before 19:30 tonight — unmovable meeting.
A: send-msg `find constance` I won't be home before 19:30 tonight. I have a meeting I can't move.
Q: Tell John that I need to book an appointment at 10:30
A:
The number of examples typically range from 0 to 100 depending on how many can fit in the maximum input length for a single prompt. Maximum input length can vary depending on the specific models you use. Few-shot learning enables a major reduction in the amount of task-specific data required for accurate predictions. This approach will typically perform less accurately than a fine-tuned model.
**One-shot**: This case is the same as the few-shot approach except only one example is provided.
**Zero-shot**: In this case, no examples are provided to the model and only the task request is provided.
#### Models
The service provides users access to several different models. Each model provides a different capability and price point.
GPT-4 models are the latest available models. These models are currently in preview. For access, existing Azure OpenAI Service customers can apply by filling out this [form](https://customervoice.microsoft.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR7en2Ais5pxKtso_Pz4b1_xURjE4QlhVUERGQ1NXOTlNT0w1NldTWjJCMSQlQCN0PWcu).
The GPT-3 base models are known as Davinci, Curie, Babbage, and Ada in decreasing order of capability and increasing order of speed.
The Codex series of models is a descendant of GPT-3 and has been trained on both natural language and code to power natural language to code use cases. Learn more about each model on our [models concept page](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/models).
The following table describes model families currently available in Azure OpenAI Service. Not all models are available in all regions currently. Please refer to the capability table at the bottom for a full breakdown.
| Model family | Description |
|---|---|
|[GPT-4](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/models#gpt-4-models)|A set of models that improve on GPT-3.5 and can understand as well as generate natural language and code. These models are currently in preview.|
|[GPT-3](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/models#gpt-3-models)|A series of models that can understand and generate natural language. This includes the new [ChatGPT model (preview)](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/chatgpt?pivots=programming-language-chat-completions).|
|[Codex](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/models#codex-models)|A series of models that can understand and generate code, including translating natural language to code.|
|[Embeddings](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/models#embeddings-models)|A set of models that can understand and use embeddings. An embedding is a special format of data representation that can be easily utilized by machine learning models and algorithms. The embedding is an information dense representation of the semantic meaning of a piece of text. Currently, we offer three families of Embeddings models for different functionalities: similarity, text search, and code search.|
To learn more visit [Azure OpenAI Service models](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/models).
#### Responsible AI with the Azure OpenAI Service
At Microsoft, we're committed to the advancement of AI driven by principles that put people first. Generative models such as the ones available in Azure OpenAI Service have significant potential benefits, but without careful design and thoughtful mitigations, such models have the potential to generate incorrect or even harmful content. Microsoft has made significant investments to help guard against abuse and unintended harm, which includes requiring applicants to show well-defined use cases, incorporating Microsoft’s [principles for responsible AI use](https://www.microsoft.com/ai/responsible-ai?activetab=pivot1:primaryr6), building content filters to support customers, and providing responsible AI implementation guidance to onboarded customers.
More details on the RAI guidelines for the Azure OpenAI Service can be found [here](https://learn.microsoft.com/en-us/legal/cognitive-services/openai/transparency-note?context=/azure/cognitive-services/openai/context/context).
## Trademarks
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
trademarks or logos is subject to and must follow
[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).
Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.
Any use of third-party trademarks or logos are subject to those third-party's policies.
| |
142777
|
jsonpointer==2.3
jsonschema==4.17.3
openai==0.26.5
| |
142832
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Embedding texts that are longer than the model's maximum context length\n",
"OpenAI's embedding models cannot embed text that exceeds a maximum length. The maximum length varies by model, and is measured by _tokens_, not string length. If you are unfamiliar with tokenization, check out [How to count tokens with tiktoken](How_to_count_tokens_with_tiktoken.ipynb).\n",
"\n",
"This notebook shows how to handle texts that are longer than a model's maximum context length. We'll demonstrate using embeddings from `text-embedding-ada-002`, but the same ideas can be applied to other models and tasks. To learn more about embeddings, check out the [OpenAI Embeddings Guide](https://beta.openai.com/docs/guides/embeddings)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Installation\n",
"Install the Azure Open AI SDK using the below command."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [
{
"data": {
"text/html": [
"<div><div></div><div></div><div><strong>Installed Packages</strong><ul><li><span>Azure.AI.OpenAI, 1.0.0-beta.14</span></li></ul></div></div>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"#r \"nuget: Azure.AI.OpenAI, 1.0.0-beta.14\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [],
"source": [
"#r \"nuget:Microsoft.DotNet.Interactive.AIUtilities, 1.0.0-beta.24129.1\""
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [],
"source": [
"using Microsoft.DotNet.Interactive;\n",
"using Microsoft.DotNet.Interactive.AIUtilities;"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Run this cell, it will prompt you for the apiKey, endPoint, and embedding deployment"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [],
"source": [
"var azureOpenAIKey = await Kernel.GetPasswordAsync(\"Provide your OPEN_AI_KEY\");\n",
"\n",
"// Your endpoint should look like the following https://YOUR_OPEN_AI_RESOURCE_NAME.openai.azure.com/\n",
"var azureOpenAIEndpoint = await Kernel.GetInputAsync(\"Provide the OPEN_AI_ENDPOINT\");\n",
"\n",
"// Enter the deployment name you chose when you deployed the model.\n",
"var deployment = await Kernel.GetInputAsync(\"Provide embedding deployment name\");"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Import namesapaces and create an instance of `OpenAiClient` using the `azureOpenAIEndpoint` and the `azureOpenAIKey`"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [],
"source": [
"using Azure;\n",
"using Azure.AI.OpenAI;\n",
"using System.Collections.Generic;"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [],
"source": [
"OpenAIClient client = new (new Uri(azureOpenAIEndpoint), new AzureKeyCredential(azureOpenAIKey.GetClearTextPassword()));"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [],
"source": [
"var longText = string.Join(\" \", Enumerable.Repeat(\"AGI\", 5000));"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Run the following cell\n",
"\n",
"It will display and error like:\n",
"```\n",
"Azure.RequestFailedException: This model's maximum context length is 8191 tokens, however you requested 10000 tokens (10000 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.\n",
"\n",
"Status: 400 (model_error)\n",
"\n",
"Content:\n",
"\n",
"{\n",
"\n",
" \"error\": {\n",
"\n",
" \"message\": \"This model's maximum context length is 8191 tokens, however you requested 10000 tokens (10000 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.\",\n",
"\n",
" \"type\": \"invalid_request_error\",\n",
"\n",
" \"param\": null,\n",
"\n",
" \"code\": null\n",
"\n",
" }\n",
"\n",
"}\n",
"```\n",
"\n",
"This shows that we have crossed the limit of `8191` tokens."
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [
{
"ename": "Error",
| |
142833
|
"evalue": "Azure.RequestFailedException: This model's maximum context length is 8191 tokens, however you requested 10000 tokens (10000 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.\r\nStatus: 400 (model_error)\r\n\r\nContent:\r\n{\n \"error\": {\n \"message\": \"This model's maximum context length is 8191 tokens, however you requested 10000 tokens (10000 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.\",\n \"type\": \"invalid_request_error\",\n \"param\": null,\n \"code\": null\n }\n}\n\r\n\r\nHeaders:\r\nAccess-Control-Allow-Origin: REDACTED\r\nX-Content-Type-Options: REDACTED\r\napim-request-id: REDACTED\r\nX-Request-ID: REDACTED\r\nms-azureml-model-error-reason: REDACTED\r\nms-azureml-model-error-statuscode: REDACTED\r\nx-ms-client-request-id: 89940e48-7900-40f3-ba70-68eef1b6a149\r\nx-ms-region: REDACTED\r\nStrict-Transport-Security: REDACTED\r\nDate: Tue, 07 Nov 2023 12:16:57 GMT\r\nContent-Length: 294\r\nContent-Type: application/json\r\n\r\n at Azure.Core.HttpPipelineExtensions.ProcessMessageAsync(HttpPipeline pipeline, HttpMessage message, RequestContext requestContext, CancellationToken cancellationToken)\r\n at Azure.AI.OpenAI.OpenAIClient.GetEmbeddingsAsync(EmbeddingsOptions embeddingsOptions, CancellationToken cancellationToken)\r\n at Submission#8.<<Initialize>>d__0.MoveNext()\r\n--- End of stack trace from previous location ---\r\n at Microsoft.CodeAnalysis.Scripting.ScriptExecutionState.RunSubmissionsAsync[TResult](ImmutableArray`1 precedingExecutors, Func`2 currentExecutor, StrongBox`1 exceptionHolderOpt, Func`2 catchExceptionOpt, CancellationToken cancellationToken)",
"output_type": "error",
"traceback": [
"Azure.RequestFailedException: This model's maximum context length is 8191 tokens, however you requested 10000 tokens (10000 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.\r\n",
"Status: 400 (model_error)\r\n",
"\r\n",
"Content:\r\n",
"{\n",
" \"error\": {\n",
" \"message\": \"This model's maximum context length is 8191 tokens, however you requested 10000 tokens (10000 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.\",\n",
" \"type\": \"invalid_request_error\",\n",
" \"param\": null,\n",
" \"code\": null\n",
" }\n",
"}\n",
"\r\n",
"\r\n",
"Headers:\r\n",
"Access-Control-Allow-Origin: REDACTED\r\n",
"X-Content-Type-Options: REDACTED\r\n",
"apim-request-id: REDACTED\r\n",
"X-Request-ID: REDACTED\r\n",
"ms-azureml-model-error-reason: REDACTED\r\n",
"ms-azureml-model-error-statuscode: REDACTED\r\n",
"x-ms-client-request-id: 89940e48-7900-40f3-ba70-68eef1b6a149\r\n",
"x-ms-region: REDACTED\r\n",
"Strict-Transport-Security: REDACTED\r\n",
"Date: Tue, 07 Nov 2023 12:16:57 GMT\r\n",
"Content-Length: 294\r\n",
"Content-Type: application/json\r\n",
"\r\n",
" at Azure.Core.HttpPipelineExtensions.ProcessMessageAsync(HttpPipeline pipeline, HttpMessage message, RequestContext requestContext, CancellationToken cancellationToken)\r\n",
" at Azure.AI.OpenAI.OpenAIClient.GetEmbeddingsAsync(EmbeddingsOptions embeddingsOptions, CancellationToken cancellationToken)\r\n",
" at Submission#8.<<Initialize>>d__0.MoveNext()\r\n",
"--- End of stack trace from previous location ---\r\n",
" at Microsoft.CodeAnalysis.Scripting.ScriptExecutionState.RunSubmissionsAsync[TResult](ImmutableArray`1 precedingExecutors, Func`2 currentExecutor, StrongBox`1 exceptionHolderOpt, Func`2 catchExceptionOpt, CancellationToken cancellationToken)"
]
}
],
"source": [
"var embeddingResponse = await client.GetEmbeddingsAsync(new EmbeddingsOptions(deployment, new []{ longText }));"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Clearly we want to avoid these errors, particularly when handling programmatically with a large number of embeddings. Yet, we still might be faced with texts that are longer than the maximum context length. Below we describe and provide recipes for the main approaches to handling these longer texts: (1) simply truncating the text to the maximum allowed length, and (2) chunking the text and embedding each chunk individually."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1. Truncating the input text\n",
"The simplest solution is to truncate the input text to the maximum allowed length. Because the context length is measured in tokens, we have to first tokenize the text before truncating it. The API accepts inputs both in the form of text or tokens, so as long as you are careful that you are using the appropriate encoding, there is no need to convert the tokens back into string form. Below is an example of such a truncation function."
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [
{
"data": {
"text/html": [
"<div class=\"dni-plaintext\"><pre>19999</pre></div><style>\r\n",
".dni-code-hint {\r\n",
" font-style: italic;\r\n",
" overflow: hidden;\r\n",
" white-space: nowrap;\r\n",
"}\r\n",
".dni-treeview {\r\n",
" white-space: nowrap;\r\n",
"}\r\n",
".dni-treeview td {\r\n",
" vertical-align: top;\r\n",
" text-align: start;\r\n",
"}\r\n",
"details.dni-treeview {\r\n",
" padding-left: 1em;\r\n",
"}\r\n",
"table td {\r\n",
" text-align: start;\r\n",
"}\r\n",
"table tr { \r\n",
" vertical-align: top; \r\n",
" margin: 0em 0px;\r\n",
"}\r\n",
"table tr td pre \r\n",
"{ \r\n",
" vertical-align: top !important; \r\n",
" margin: 0em 0px !important;\r\n",
"} \r\n",
"table th {\r\n",
" text-align: start;\r\n",
"}\r\n",
"</style>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<div class=\"dni-plaintext\"><pre>16382</pre></div><style>\r\n",
".dni-code-hint {\r\n",
" font-style: italic;\r\n",
" overflow: hidden;\r\n",
" white-space: nowrap;\r\n",
"}\r\n",
".dni-treeview {\r\n",
" white-space: nowrap;\r\n",
"}\r\n",
".dni-treeview td {\r\n",
" vertical-align: top;\r\n",
" text-align: start;\r\n",
"}\r\n",
"details.dni-treeview {\r\n",
" padding-left: 1em;\r\n",
"}\r\n",
"table td {\r\n",
" text-align: start;\r\n",
"}\r\n",
"table tr { \r\n",
" vertical-align: top; \r\n",
" margin: 0em 0px;\r\n",
"}\r\n",
"table tr td pre \r\n",
"{ \r\n",
" vertical-align: top !important; \r\n",
" margin: 0em 0px !important;\r\n",
"} \r\n",
"table th {\r\n",
" text-align: start;\r\n",
"}\r\n",
| |
142834
|
"</style>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"var tokenizer = await Tokenizer.CreateAsync(TokenizerModel.ada2);\n",
"var truncated = tokenizer.TruncateByTokenCount(longText, 8191);\n",
"longText.Length.Display();\n",
"truncated.Length.Display();"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. Chunking the input text\n",
"Though truncation works, discarding potentially relevant text is a clear drawback. Another approach is to divide the input text into chunks and then embed each chunk individually. Then, we can either use the chunk embeddings separately, or combine them in some way, such as averaging (weighted by the size of each chunk).\n",
"\n",
"Now we define a function that encodes a string into tokens and then breaks it up into chunks.\n",
"\n",
"Finally, we can write a function that safely handles embedding requests, even when the input text is longer than the maximum context length, by chunking the input tokens and embedding each chunk individually. The `average` flag can be set to `True` to return the weighted average of the chunk embeddings, or `False` to simply return the unmodified list of chunk embeddings."
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [
{
"data": {
"text/html": [
"<div class=\"dni-plaintext\"><pre>5</pre></div><style>\r\n",
".dni-code-hint {\r\n",
" font-style: italic;\r\n",
" overflow: hidden;\r\n",
" white-space: nowrap;\r\n",
"}\r\n",
".dni-treeview {\r\n",
" white-space: nowrap;\r\n",
"}\r\n",
".dni-treeview td {\r\n",
" vertical-align: top;\r\n",
" text-align: start;\r\n",
"}\r\n",
"details.dni-treeview {\r\n",
" padding-left: 1em;\r\n",
"}\r\n",
"table td {\r\n",
" text-align: start;\r\n",
"}\r\n",
"table tr { \r\n",
" vertical-align: top; \r\n",
" margin: 0em 0px;\r\n",
"}\r\n",
"table tr td pre \r\n",
"{ \r\n",
" vertical-align: top !important; \r\n",
" margin: 0em 0px !important;\r\n",
"} \r\n",
"table th {\r\n",
" text-align: start;\r\n",
"}\r\n",
"</style>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"var textChunks = tokenizer.ChunkByTokenCount( longText, 2000, true).ToList();\n",
"textChunks.Count.Display();"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"dotnet_interactive": {
"language": "csharp"
},
"polyglot_notebook": {
"kernelName": "csharp"
},
"vscode": {
"languageId": "polyglot-notebook"
}
},
"outputs": [
{
"data": {
"text/html": [
| |
142894
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure chat completion models with your own data (preview)\n",
"This example shows how to use Azure OpenAI service models with your own data. The feature is currently in preview. \n",
"\n",
"Azure OpenAI on your data enables you to run supported chat models such as GPT-3.5-Turbo and GPT-4 on your data without needing to train or fine-tune models. Running models on your data enables you to chat on top of, and analyze your data with greater accuracy and speed. One of the key benefits of Azure OpenAI on your data is its ability to tailor the content of conversational AI. Because the model has access to, and can reference specific sources to support its responses, answers are not only based on its pretrained knowledge but also on the latest information available in the designated data source. This grounding data also helps the model avoid generating responses based on outdated or incorrect information.\n",
"\n",
"Azure OpenAI on your own data with Azure Cognitive Search provides a customizable, pre-built solution for knowledge retrieval, from which a conversational AI application can be built. To see alternative methods for knowledge retrieval and semantic search, check out the cookbook examples for [vector databases](https://github.com/openai/openai-cookbook/tree/main/examples/vector_databases)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## How it works\n",
"\n",
"[Azure OpenAI on your own data](https://learn.microsoft.com/azure/ai-services/openai/concepts/use-your-data) connects the model with your data, giving it the ability to retrieve and utilize data in a way that enhances the model's output. Together with Azure Cognitive Search, data is retrieved from designated data sources based on the user input and provided conversation history. The data is then augmented and resubmitted as a prompt to the model, giving the model contextual information it can use to generate a response.\n",
"\n",
"See the [Data, privacy, and security for Azure OpenAI Service](https://learn.microsoft.com/legal/cognitive-services/openai/data-privacy?context=%2Fazure%2Fai-services%2Fopenai%2Fcontext%2Fcontext) for more information."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prerequisites\n",
"To get started, we'll cover a few prequisites. \n",
"\n",
"To properly access the Azure OpenAI Service, we need to create the proper resources at the [Azure Portal](https://portal.azure.com) (you can check a detailed guide on how to do this in the [Microsoft Docs](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal))\n",
"\n",
"To use your own data with Azure OpenAI models, you will need:\n",
"\n",
"1. Azure OpenAI access and a resource with a chat model deployed (for example, GPT-3 or GPT-4)\n",
"2. Azure Cognitive Search resource\n",
"3. Azure Blob Storage resource\n",
"4. Your documents to be used as data (See [data source options](https://learn.microsoft.com/azure/ai-services/openai/concepts/use-your-data#data-source-options))\n",
"\n",
"\n",
"For a full walk-through on how to upload your documents to blob storage and create an index using the Azure AI Studio, see this [Quickstart](https://learn.microsoft.com/azure/ai-services/openai/use-your-data-quickstart?pivots=programming-language-studio&tabs=command-line)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"First, we install the necessary dependencies."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"! pip install \"openai>=0.27.6\"\n",
"! pip install python-dotenv"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In this example, we'll use `dotenv` to load our environment variables. To connect with Azure OpenAI and the Search index, the following variables should be added to a `.env` file in `KEY=VALUE` format:\n",
"\n",
"* `OPENAI_API_BASE` - the Azure OpenAI endpoint. This can be found under \"Keys and Endpoints\" for your Azure OpenAI resource in the Azure Portal.\n",
"* `OPENAI_API_KEY` - the Azure OpenAI API key. This can be found under \"Keys and Endpoints\" for your Azure OpenAI resource in the Azure Portal. Omit if using Azure Active Directory authentication (see below `Authentication using Microsoft Active Directory`)\n",
"* `SEARCH_ENDPOINT` - the Cognitive Search endpoint. This URL be found on the \"Overview\" of your Search resource on the Azure Portal.\n",
"* `SEARCH_KEY` - the Cognitive Search API key. Found under \"Keys\" for your Search resource in the Azure Portal.\n",
"* `SEARCH_INDEX_NAME` - the name of the index you created with your own data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import openai\n",
"import dotenv\n",
"\n",
"dotenv.load_dotenv()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"openai.api_base = os.environ[\"OPENAI_API_BASE\"]\n",
"\n",
"# Azure OpenAI on your own data is only supported by the 2023-08-01-preview API version\n",
"openai.api_version = \"2023-08-01-preview\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Authentication\n",
"\n",
"The Azure OpenAI service supports multiple authentication mechanisms that include API keys and Azure credentials."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"use_azure_active_directory = False # Set this flag to True if you are using Azure Active Directory"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"#### Authentication using API key\n",
"\n",
"To set up the OpenAI SDK to use an *Azure API Key*, we need to set up the `api_type` to `azure` and set `api_key` to a key associated with your endpoint (you can find this key in *\"Keys and Endpoints\"* under *\"Resource Management\"* in the [Azure Portal](https://portal.azure.com))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"if not use_azure_active_directory:\n",
" openai.api_type = 'azure'\n",
" openai.api_key = os.environ[\"OPENAI_API_KEY\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Authentication using Microsoft Active Directory\n",
"Let's now see how we can get a key via Microsoft Active Directory Authentication. See the [documentation](https://learn.microsoft.com/azure/ai-services/openai/how-to/managed-identity) for more information on how to set this up."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"! pip install azure-identity"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"from azure.identity import DefaultAzureCredential\n",
"\n",
"if use_azure_active_directory:\n",
" default_credential = DefaultAzureCredential()\n",
" token = default_credential.get_token(\"https://cognitiveservices.azure.com/.default\")\n",
"\n",
" openai.api_type = \"azure_ad\"\n",
" openai.api_key = token.token"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"A token is valid for a period of time, after which it will expire. To ensure a valid token is sent with every request, you can refresh an expiring token by hooking into requests.auth:"
]
},
{
"cell_type": "code",
"execution_count": 8,
| |
142904
|
{
"cells": [
{
"cell_type": "markdown",
"id": "278e7451",
"metadata": {},
"source": [
"<h1 align =\"center\"> Python SDK Sample</h1>\n",
"<hr>\n",
"\n",
"# Chat Completions\n",
"\n",
"Chat models take a series of messages as input, and return a model-generated message as output.\n",
"The main input is the messages parameter. Messages must be an array of message objects, where each object has a role (either \"system\", \"user\", or \"assistant\") and content (the content of the message). "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fb97123e",
"metadata": {},
"outputs": [],
"source": [
"# if needed, install and/or upgrade to the latest version of the OpenAI Python library\n",
"%pip install --upgrade openai"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "ccbb9a99",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# import os module & the OpenAI Python library for calling the OpenAI API\n",
"import os\n",
"from openai import AzureOpenAI\n",
"import dotenv\n",
"dotenv.load_dotenv()\n"
]
},
{
"cell_type": "markdown",
"id": "6d33f92a",
"metadata": {},
"source": [
"### Setup Parameters"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "1d67d3b6",
"metadata": {},
"outputs": [],
"source": [
"# Setting up the deployment name\n",
"deployment_name = os.environ['COMPLETIONS_MODEL']\n",
"\n",
"# The API key for your Azure OpenAI resource.\n",
"api_key = os.environ[\"AZURE_OPENAI_API_KEY\"]\n",
"\n",
"# The base URL for your Azure OpenAI resource. e.g. \"https://<your resource name>.openai.azure.com\"\n",
"azure_endpoint = os.environ['AZURE_OPENAI_ENDPOINT']\n",
"\n",
"# Currently Chat Completion API have the following versions available: 2023-03-15-preview\n",
"api_version = os.environ['OPENAI_API_VERSION']\n",
"\n",
"client = AzureOpenAI(\n",
" api_key=api_key, \n",
" azure_endpoint=azure_endpoint,\n",
" api_version=api_version\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "150a3db0",
"metadata": {},
"outputs": [],
"source": [
"# A sample API call for chat completions looks as follows:\n",
"# Messages must be an array of message objects, where each object has a role (either \"system\", \"user\", or \"assistant\") and content (the content of the message).\n",
"# For more info: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#chat-completions\n",
"\n",
"try:\n",
" response = client.chat.completions.create(\n",
" model=deployment_name,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
" {\"role\": \"user\", \"content\": \"Who won the world series in 2020?\"}\n",
" ]\n",
" )\n",
"\n",
" # print the response\n",
" print(response.choices[0].message.content)\n",
"\n",
"except openai.AuthenticationError as e:\n",
" # Handle Authentication error here, e.g. invalid API key\n",
" print(f\"OpenAI API returned an Authentication Error: {e}\")\n",
"\n",
"except openai.APIConnectionError as e:\n",
" # Handle connection error here\n",
" print(f\"Failed to connect to OpenAI API: {e}\")\n",
"\n",
"except openai.BadRequestError as e:\n",
" # Handle connection error here\n",
" print(f\"Invalid Request Error: {e}\")\n",
"\n",
"except openai.RateLimitError as e:\n",
" # Handle rate limit error\n",
" print(f\"OpenAI API request exceeded rate limit: {e}\")\n",
"\n",
"except openai.InternalServerError as e:\n",
" # Handle Service Unavailable error\n",
" print(f\"Service Unavailable: {e}\")\n",
"\n",
"except openai.APITimeoutError as e:\n",
" # Handle request timeout\n",
" print(f\"Request timed out: {e}\")\n",
" \n",
"except openai.APIError as e:\n",
" # Handle API error here, e.g. retry or log\n",
" print(f\"OpenAI API returned an API Error: {e}\")\n",
"\n",
"except:\n",
" # Handles all other exceptions\n",
" print(\"An exception has occured.\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| |
142967
|
# Introduction
This repository shows how to setup and use Azure OpenAI models' API with LangChain.
## Installation
Install all Python modules and packages listed in the requirements.txt file using the below command.
```python
pip install -r requirements.txt
```
### Microsoft Azure Endpoints
In order to use the Open AI library or REST API with Microsoft Azure endpoints, you need to set DEPLOYMENT_NAME, OPENAI_API_BASE & OPENAI_API_VERSION in _config.json_ file.
```js
{
"DEPLOYMENT_NAME":"<Model Deployment Name>",
"OPENAI_API_BASE":"https://<Your Azure Resource Name>.openai.azure.com",
"OPENAI_API_VERSION":"<OpenAI API Version>"
}
```
### For getting started:
- Add "OPENAI_API_KEY" as variable name and \<Your API Key Value\> as variable value in the environment variables.
<br>
One can get the OPENAI_API_KEY value from the Azure Portal. Go to https://portal.azure.com, find your resource and then under "Resource Management" -> "Keys and Endpoints" look for one of the "Keys" values.
<br>
WINDOWS Users:
setx OPENAI_API_KEY "REPLACE_WITH_YOUR_KEY_VALUE_HERE"
MACOS/LINUX Users:
export OPENAI_API_KEY="REPLACE_WITH_YOUR_KEY_VALUE_HERE"
- To find your "DEPLOYMENT_NAME" go to the deployments page of the Azure AI Studio. Create a deployment if one does not already exist.
One can start with using your model name as "gpt-35-turbo-0613" or "gpt-4."
- To find your "OPENAI_API_BASE" go to https://portal.azure.com, find your resource and then under "Resource Management" -> "Keys and Endpoints" look for the "Endpoint" value.
- Check out versions [here](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference).
## Requirements
Python 3.8+ <br>
Jupyter Notebook 6.5.2
<br>
## Trademarks
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
trademarks or logos is subject to and must follow
[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).
Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.
Any use of third-party trademarks or logos are subject to those third-party's policies.
| |
142969
|
"set_key(\".env\", \"OPENAI_API_VERSION\", \"Your api version here\")\n",
"set_key(\".env\", \"COMPLETIONS_MODEL\", \"Your model here\")"
]
},
{
"cell_type": "markdown",
"id": "4e595a7e-9b33-472d-96ff-e3229d723642",
"metadata": {},
"source": [
"#### Get all required Environment Variables"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "fb74be9e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"load_dotenv()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "b456baeb-fe51-45c8-940e-7f6ec52da0df",
"metadata": {},
"outputs": [],
"source": [
"# Setting up the deployment name\n",
"DEPLOYMENT_NAME = os.environ[\"COMPLETIONS_MODEL\"]\n",
"\n",
"# The API key for your Azure OpenAI resource.\n",
"API_KEY = os.environ[\"AZURE_OPENAI_API_KEY\"]\n",
"\n",
"# The base URL for your Azure OpenAI resource. e.g. \"https://<your resource name>.openai.azure.com\"\n",
"ENDPOINT = os.environ[\"AZURE_OPENAI_ENDPOINT\"]\n",
"\n",
"# The API version required\n",
"VERSION = os.environ[\"OPENAI_API_VERSION\"]"
]
},
{
"cell_type": "markdown",
"id": "a43b7040-d6ab-4df8-8432-907eb1ca4099",
"metadata": {},
"source": [
"## Creating an AzureChatOpenAI Model\n",
"\n",
"More information on LangChain's AzureChatOpenAI support can be found in [**the integration documentation**](https://python.langchain.com/v0.2/docs/integrations/chat/azure_chat_openai/)."
]
},
{
"cell_type": "markdown",
"id": "172e4ff3-b3fd-4f10-bd2b-18ac1e259973",
"metadata": {},
"source": [
"- Environment variable values can be passed as parameters.\n",
"- Alternatively, if not passed in, the constructor will search for environment variables with corresponding names."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "74baa57a-6b35-403c-9902-2f5a9c7e2bd4",
"metadata": {},
"outputs": [],
"source": [
"model = AzureChatOpenAI(\n",
" openai_api_version=VERSION,\n",
" azure_deployment=DEPLOYMENT_NAME,\n",
" azure_endpoint=ENDPOINT,\n",
" temperature=0.5,\n",
" max_tokens=200,\n",
" timeout=60,\n",
" max_retries=10,\n",
" # model=\"gpt-35-turbo\",\n",
" # model_version=\"0125\",\n",
" # other params...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "23b80bbe-442c-4374-a885-87bd6408dc28",
"metadata": {},
"source": [
"In the above code sample, **OPENAI_API_VERSION** and **AZURE_OPENAI_ENDPOINT** are both being passed in, but **AZURE_OPENAI_API_KEY** is being retrieved within the constructor."
]
},
{
"cell_type": "markdown",
"id": "9b85766e-2581-4f24-86c1-cde10a0601c0",
"metadata": {},
"source": [
"#### Other Optional Parameters\n",
"\n",
"- `temperature` determines how creative and unpredictable, or how deterministic and predictable, the model should be in its responses. A temperature of 0 would be predictable, while anything higher would make responses more random.\n",
"\n",
"- `max_tokens` defines the maximum number of tokens (words or pieces of words) the model can generate in its response.\n",
"\n",
"- `timeout` specifies the maximum amount of time (in seconds) to wait for a response from the API before timing out. An `APITimeoutError` will be raised in the case of a timeout.\n",
"\n",
"- `max_retries` sets the number of times the API request should be retried in case of retriable failure before giving up.\n",
"\n",
"- `model` specifies the model to be used.\n",
"\n",
"- `model_version` indicates the specific version of the chosen model to use. This is useful for maintaining consistency in testing and for tracing purposes, such as tracking API calls or diagnosing issues related to specific model versions.\n",
"\n",
"- See the [**API Reference**](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html) for more details.\n",
"\n",
"- Other parameters may be available in different SDK's."
]
},
{
"cell_type": "markdown",
"id": "b9edabc3",
"metadata": {},
"source": [
"### Wait in between API calls"
]
},
{
"cell_type": "markdown",
"id": "239cdb0b",
"metadata": {},
"source": [
"The number of API requests a user can make depends on their Azure plan and account settings. If too many requests are sent in a short period, an error may occur, prompting the user to wait for **x** amount of time before sending another request.\n",
"\n",
"When creating a model, one of the key parameters is the `max_retries` setting. The underlying Python OpenAI library will automatically wait and retry the call on your behalf at least 2 times by default before raising a `RateLimitError`. This behavior can be adjusted by setting a different value for `max_retries`.\n",
"\n",
"Visit the [**quotas and limits**](https://learn.microsoft.com/azure/ai-services/openai/quotas-limits) page to view detailed information related to account limits and restrictions."
]
},
{
"cell_type": "markdown",
"id": "28a397b3-27b7-49f0-bccf-108f39196ee6",
"metadata": {},
"source": [
"## Model Usage"
]
},
{
"cell_type": "markdown",
"id": "1a152eab-52e5-48f3-bc8f-0b410d65c7d0",
"metadata": {},
"source": [
"#### Using Messages from the `langchain_core.messages` Library\n",
"\n",
"The `langchain_core.messages` library allows the user to define messages for the model and assign roles to each message."
]
},
{
"cell_type": "markdown",
"id": "259a462a-4a83-4f17-a337-be04d67d3020",
"metadata": {},
"source": [
"- LangChain-compatible chat models take a list of `messages` as `input` and return the AI message as `output`.\n",
"\n",
"- All messages have `role` and `content` properties. In the sample below, the roles are set by using the `SystemMessage` and `HumanMessage` classes. [**We'll cover more on this later**](#assigning-roles-using-langchain-messages) .\n",
"\n",
"- Additional provider-specific information can be incorporated using the `additional_kwargs` parameter. This could include provider-specific metadata or custom settings and flags."
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "57c6f2ae-b6d0-46e3-831a-ad5f77daf0bd",
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" SystemMessage(content=\"Translate the following from German into English\"),\n",
" HumanMessage(\n",
" content=\"Sie haben gerade Ihr erstes Kunstliche Itelligenz Model erstellt!\"\n",
" ),\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 23,
| |
142970
|
"id": "b6a40126-bede-46dc-8922-8721ac2f9c22",
"metadata": {},
"outputs": [],
"source": [
"response = model.invoke(messages)"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "876d40ef-a75f-4d62-bc0a-43de6b55ce30",
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/plain": [
"'You have just created your first artificial intelligence model!'"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"response.content"
]
},
{
"cell_type": "markdown",
"id": "a0c37fe3-ed5d-4633-8e97-79cdba0e0365",
"metadata": {},
"source": [
"### Prompting\n",
"\n",
"- Prompts are the inputs to language models, refined from raw user inputs to be ready for processing by the models.\n",
"\n",
"- [**Prompting**](https://www.datacamp.com/tutorial/prompt-engineering-with-langchain) involves crafting text inputs that clearly communicate with the models, outlining the specific task we want it to accomplish. This can include:\n",
" - Selecting the appropriate wording and setting a particular tone or style.\n",
" - Providing necessary context.\n",
" - Assigning a role, such as asking it to respond as if it were a native speaker of a certain language."
]
},
{
"cell_type": "markdown",
"id": "2ee1ff1f-dbfc-445f-807e-4682db59460f",
"metadata": {},
"source": [
"#### Prompt Templates\n",
"\n",
"- LangChain allows developers to design parameterized [**Prompt Templates**](https://python.langchain.com/v0.2/docs/concepts/#prompt-templates) that are reusable and easily transferable between different models for integration.\n",
"\n",
"- It takes user input and inserts said input into the prompt to feed into the language models.\n",
"\n",
"#### `PromptTemplate`\n",
"`PromptTemplate` is used to create an instance of [**Prompt**](https://python.langchain.com/v0.2/api_reference/core/prompts/langchain_core.prompts.prompt.PromptTemplate.html#prompttemplate), and this is `invoked` by sending it to a model, which produces a `PromptValue`.\n",
"\n",
"The example code uses `.from_template`, which handles a single string template with placeholders for dynamic inputs."
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "04a11bb8-e4d9-4687-ab89-210aa1b7b51b",
"metadata": {},
"outputs": [],
"source": [
"prompt_template = PromptTemplate.from_template(\n",
" \"What vegetable crops can I grow in {month} in {city}, New Zealand?\"\n",
")\n",
"\n",
"prompt_value = prompt_template.format(month=\"December\", city=\"Rotorua\")\n",
"\n",
"\n",
"# print(prompt_template) # <- uncomment to see\n",
"# print(prompt_value) # <- uncomment to see"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "f92db0c1-3741-42be-9d1e-edbe5010b61e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"In Rotorua, New Zealand, December falls in the Southern Hemisphere's summer, which is a great time for growing a variety of vegetables. Here are some vegetable crops you can plant in December:\\n\\n1. **Tomatoes**: Ideal for summer planting, they thrive in the warm weather.\\n2. **Capsicums (Bell Peppers)**: These also enjoy the summer heat.\\n3. **Zucchini**: Fast-growing and productive during warm months.\\n4. **Cucumbers**: Perfect for summer salads and pickling.\\n5. **Beans**: Both bush and pole beans grow well in the warm season.\\n6. **Sweet Corn**: Requires warm temperatures and plenty of sunlight.\\n7. **Pumpkins**: Plant now for a harvest in autumn.\\n8. **Eggplants**: Another heat-loving crop.\\n9. **Lettuce**: Opt for heat-tolerant varieties to avoid bolting.\\n10. **Radishes**: Fast-growing and can\""
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"response = model.invoke(prompt_value)\n",
"response.content"
]
},
{
"cell_type": "markdown",
"id": "1ca904f4-faf1-4b95-bf77-7c7a7fa64515",
"metadata": {},
"source": [
"#### `ChatPromptTemplate`\n",
"\n",
"This is optimized for a conversation-like format. The prompt is a list of chat messages. Each chat message is associated with `role` and `content`. In the example code, `.from_messages` is used to include multiple messages.\n",
"\n",
"Here, we will hardcode roles in the chat prompt, as opposed to using the pre-built roles `SystemMessage` or `HumanMessage` like earlier."
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "46e869bf-370d-4742-837f-c7d5eb76a891",
"metadata": {},
"outputs": [],
"source": [
"chat_template = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"\"\"\n",
" You're a travel agent helping customers plan their trips.\n",
" Offer recommendations on natural features to visit, local cuisine, and activities based on the country the customer is asking about.\n",
" \"\"\",\n",
" ),\n",
" (\"ai\", \"Hi there, What can I help you with today?\"),\n",
" (\n",
" \"human\",\n",
" \"Hi I'm {name}, I'm planning a trip to {country}. Any recommendations\",\n",
" ),\n",
" ]\n",
")\n",
"\n",
"prompt_value = chat_template.format_messages(name=\"Lucy\", country=\"New Zealand\")\n",
"\n",
"# print(chat_template) # <- uncomment to see\n",
"# print(prompt_value) # <- uncomment to see"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "c87adc53-1248-441d-aa46-44f410c24796",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Hi Lucy! New Zealand is a fantastic choice with its stunning landscapes, rich culture, and exciting activities. Here are some recommendations to make your trip memorable:\\n\\n### Natural Features\\n1. **Fiordland National Park**: Home to the famous Milford Sound and Doubtful Sound, this area offers breathtaking fjords, waterfalls, and rainforests.\\n2. **Tongariro National Park**: Known for its dramatic volcanic landscape, you can hike the Tongariro Alpine Crossing, one of the best one-day hikes in the world.\\n3. **Rotorua**: Famous for its geothermal activity, you can see geysers, hot springs, and mud pools. Don't miss the Wai-O-Tapu Thermal Wonderland.\\n4. **Aoraki/Mount Cook**: The highest mountain in New Zealand, offering stunning views, glaciers, and excellent hiking trails.\\n5. **Bay of Islands**: A beautiful coastal area with over 140 subtropical islands, perfect for sailing, fishing,\""
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"response = model.invoke(prompt_value)\n",
"response.content"
]
},
{
"cell_type": "markdown",
"id": "d83fef4b",
"metadata": {},
"source": [
"#### Assigning Roles Using LangChain Messages"
]
},
{
"cell_type": "markdown",
| |
142971
|
"id": "cb6794d6-ba1f-4367-bae6-af0136212713",
"metadata": {},
"source": [
"Compared to hardcoding the roles like above, LangChain Messages allow for more flexibility and better management, especially with complex conversations involving multiple roles. It also simplifies the visualization of the conversation flow.\n",
"\n",
"It is therefore recommended to use LangChain messages where possible.\n",
"\n",
"**Basic Message Types**\n",
"\n",
"| | |\n",
"|-------------|-----------------------------------------------------------------|\n",
"| `SystemMessage` | Set how the AI should behave (appropriate wording, tone, style, etc.) |\n",
"| `HumanMessage` | Message sent from the user |\n",
"| `AIMessage` | Message from the AI chat model (context setting, guidance for response) |\n",
"\n",
"For more info, see [**Message Types**](https://python.langchain.com/v0.1/docs/modules/model_io/chat/message_types/) and [**API Reference**](https://api.python.langchain.com/en/latest/core_api_reference.html#module-langchain_core.messages)."
]
},
{
"cell_type": "markdown",
"id": "95152680-ebc2-4106-99bf-679547d0d1fe",
"metadata": {},
"source": [
"#### `base message` and `MessagePromptTemplate`\n",
"We can also pass a `base message` or `MessagePromptTemplate` instead of tuples."
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "b956d584-881a-4e9b-ae81-565ffdd7bf4a",
"metadata": {},
"outputs": [],
"source": [
"chat_template = ChatPromptTemplate.from_messages(\n",
" [\n",
" SystemMessage(\n",
" content=(\n",
" \"You are a translator. You are to translate the text into English.\"\n",
" )\n",
" ),\n",
" HumanMessagePromptTemplate.from_template(\"{text}\"),\n",
" ]\n",
")\n",
"\n",
"prompt_value = chat_template.format_messages(text=\"ゆずは日本で人気の果物です\")\n",
"\n",
"# print(chat_template) # <- uncomment to see\n",
"# print(prompt_value) # <- uncomment to see"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "648579a0-ca55-4885-bf70-47c0e08e066d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Yuzu is a popular fruit in Japan.'"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"response = model.invoke(prompt_value)\n",
"response.content"
]
},
{
"cell_type": "markdown",
"id": "33f05d01-151b-4cda-a386-567a5523bb03",
"metadata": {},
"source": [
"#### `MessagePlaceHolder`\n",
"This is used to select which messages to include when formatting."
]
},
{
"cell_type": "code",
"execution_count": 50,
"id": "0f7c266b-4438-4912-9b21-b0611960f296",
"metadata": {},
"outputs": [],
"source": [
"# SYSTEM ROLE Prompt\n",
"system_template = SystemMessagePromptTemplate.from_template(\"\"\"\n",
" You are a precise assistant who knows the schedule of the team.\n",
" Schedule details are as follows: {schedule}.\n",
" Only provide information to the team members.\n",
" Strictly only provide information specific to what is asked, Do not give extra information.\n",
" \"\"\")\n",
"# HUMAN ROLE Prompt\n",
"human_template = HumanMessagePromptTemplate.from_template(\"My name is {user_name}.\")\n",
"# AI ROLE Prompt\n",
"ai_template = AIMessagePromptTemplate.from_template(\n",
" \"Hello {user_name}, how can I help you today?\"\n",
")\n",
"\n",
"chat_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" # this has essentially created a 'conversation history'\n",
" system_template,\n",
" human_template,\n",
" ai_template,\n",
" MessagesPlaceholder(variable_name=\"conversation\"),\n",
" ]\n",
")\n",
"\n",
"# print(chat_prompt) # <- uncomment to see the chat prompt"
]
},
{
"cell_type": "markdown",
"id": "6ab8c223-97d6-4a5c-83a5-24eeba7c47f3",
"metadata": {},
"source": [
"We can then input more prompts, which will take the `MessagePlaceholders`' place and create lines of sentences or a conversation."
]
},
{
"cell_type": "code",
"execution_count": 52,
"id": "cea2fce5-2692-4df1-bac1-80bb9472d7f0",
"metadata": {},
"outputs": [],
"source": [
"schedule = \"\"\"\n",
" Team Members: Alice, Bob, Carol, David, Emily\n",
" Team Meeting Schedule: Every Tuesday at 11:00 AM\n",
" Topic: LangChain with Azure OpenAI Integration\n",
"\"\"\"\n",
"# these messages will take MESSAGEPLACEHOLDERS place\n",
"human_query = HumanMessage(\"When is the next team meeting and who is attending?\")\n",
"ai_message = AIMessage(\"Hold on a second, let me check the schedule for you.\")\n",
"\n",
"prompt_value = chat_prompt.format_messages(\n",
" conversation=[human_query, ai_message], user_name=\"David\", schedule=schedule\n",
")\n",
"\n",
"# print(prompt_value) # <- uncomment to see the prompt"
]
},
{
"cell_type": "code",
"execution_count": 53,
"id": "1558cfbf-5806-4a24-a0fa-8a1fb90a00d8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'The next team meeting is on Tuesday at 11:00 AM. The attendees are Alice, Bob, Carol, David, and Emily.'"
]
},
"execution_count": 53,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"response = model.invoke(prompt_value)\n",
"response.content"
]
},
{
"cell_type": "markdown",
"id": "34ec2ed1-83f1-4331-97fb-a6319f53e1fd",
"metadata": {},
"source": [
"#### `FewShotPrompt`\n",
"\n",
"We can use examples (shots) to condition the model for a better response by including some example input and output in the prompt. This will inform the model about the context and how we want the output to be formatted."
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "fcaa0701-6f2c-4ffb-b011-75b6849d2ebe",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Currency Unit Conversion: [Input] one dollar => [Output] $1\n",
"Currency Unit Conversion: [Input] one hundred yen => [Output] ¥100\n"
]
}
],
"source": [
"examples = [\n",
" {\"input\": \"one dollar\", \"output\": \"$1\"},\n",
" {\"input\": \"thirty five euros\", \"output\": \"€35\"},\n",
"]\n",
"\n",
"example_prompt = PromptTemplate(\n",
" input_variables=[\"input\", \"output\"],\n",
" template=\"Currency Unit Conversion: [Input] {input} => [Output] {output}\",\n",
")\n",
"\n",
"# unpack the first example dictionary and feed it to the prompt template to format\n",
| |
142978
|
{
"cells": [
{
"cell_type": "markdown",
"id": "6d251ced",
"metadata": {},
"source": [
"# How to use LangChain and Azure OpenAI with Python\n",
"\n",
"\n",
"Langchain is an open source framework for developing applications using large language models (LLM). <br>\n",
"\n",
"This guide will demonstrate how to setup and use Azure OpenAI models' API with LangChain.\n",
" "
]
},
{
"cell_type": "markdown",
"id": "9d0ee335",
"metadata": {},
"source": [
"## Set Up\n",
"The following libraries must be installed to use LangChain with Azure OpenAI.<br>"
]
},
{
"cell_type": "code",
"execution_count": 36,
"id": "35289cea",
"metadata": {},
"outputs": [],
"source": [
"#%pip install --upgrade openai\n",
"#%pip install langchain"
]
},
{
"cell_type": "markdown",
"id": "ba880453",
"metadata": {},
"source": [
"## API Configuation and Deployed Model Setup\n",
"\n",
"After installing the necessary libraies, the API must be configured. The code below shows how to configure the API directly in your Python environment. \n"
]
},
{
"cell_type": "code",
"execution_count": 42,
"id": "a9752fda",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"import openai\n",
"import json\n",
"import os\n",
"from langchain.chat_models import AzureChatOpenAI\n",
"from langchain.schema import HumanMessage\n",
"from langchain import LLMChain\n",
"\n",
"\n",
"# Load config values\n",
"with open(r'config.json') as config_file:\n",
" config_details = json.load(config_file)\n",
"\n",
"# The base URL for your Azure OpenAI resource. e.g. \"https://<your resource name>.openai.azure.com\"\n",
"openai_api_base=config_details['OPENAI_API_BASE']\n",
" \n",
"# API version e.g. \"2023-07-01-preview\"\n",
"openai_api_version=config_details['OPENAI_API_VERSION']\n",
"\n",
"# The name of your Azure OpenAI deployment chat model. e.g. \"gpt-35-turbo-0613\"\n",
"deployment_name=config_details['DEPLOYMENT_NAME']\n",
"\n",
"# The API key for your Azure OpenAI resource.\n",
"openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"\n",
"# This is set to `azure`\n",
"openai_api_type=\"azure\""
]
},
{
"cell_type": "markdown",
"id": "52da840e",
"metadata": {},
"source": [
"## Deployed Model Setup"
]
},
{
"cell_type": "code",
"execution_count": 43,
"id": "041f0a56",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"In the realm of dreams, where thoughts take flight,\\nA tapestry of words, I shall now write.\\nWith ink and quill, I'll weave a tale,\\nOf love and hope, where hearts prevail.\\n\\nIn meadows adorned with flowers so fair,\\nA gentle breeze whispers secrets in the air.\\nThe sun shines bright, painting the sky,\\nA canvas of colors, where dreams never die.\\n\\nBeneath a canopy of stars, we shall dance,\\nLost in a moment, in a lover's trance.\\nOur hearts entwined, beats synchronized,\\nA symphony of love, never compromised.\\n\\nThrough valleys of sorrow, we shall tread,\\nWith courage and strength, our fears we'll shed.\\nFor love, a beacon, shall guide our way,\\nThrough darkest nights, to a brighter day.\\n\\nIn the depths of silence, a whispered prayer,\\nFor peace and harmony, beyond compare.\\nMay kindness bloom, like flowers in spring,\\nAnd compassion's song, forever sing.\\n\\nOh, let this poem be a gentle reminder,\\nThat within us all, love is a powerful binder.\\nFor in these words, a message so true,\\nThat love's embrace can heal and renew.\\n\\nSo let us cherish, this gift we possess,\\nThe power of words, to heal and impress.\\nThrough poetry's grace, may hearts be moved,\\nAnd in its beauty, we shall be proved.\", additional_kwargs={}, example=False)"
]
},
"execution_count": 43,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Create an instance of chat llm\n",
"llm = AzureChatOpenAI(\n",
" openai_api_base=openai_api_base,\n",
" openai_api_version=openai_api_version,\n",
" deployment_name=deployment_name,\n",
" openai_api_key=openai_api_key,\n",
" openai_api_type=openai_api_type,\n",
")\n",
"\n",
"llm([HumanMessage(content=\"Write me a poem\")])"
]
},
{
"cell_type": "markdown",
"id": "dc7ea2d4",
"metadata": {},
"source": [
"## PromptTemplates\n",
"\n",
"Langchain provides a built in PromptsTemplate module to simplify the construction of prompts to get more specific answers."
]
},
{
"cell_type": "code",
"execution_count": 44,
"id": "927d4bac",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Example #1:\n",
"content='A highly recommended face wash for acne-prone skin is the \"Neutrogena Oil-Free Acne Wash.\" This product contains salicylic acid, which helps to treat and prevent acne by unclogging pores and reducing inflammation. It is oil-free, non-comedogenic, and gentle enough for daily use. Additionally, it effectively removes dirt, excess oil, and makeup without over-drying the skin.' additional_kwargs={} example=False\n",
"\n",
"\n",
"Example #2:\n",
"If you're looking for warm weather and beautiful beaches in December, here are a few destinations you might consider:\n",
"\n",
"1. Maldives: This tropical paradise offers pristine beaches, crystal-clear waters, and luxurious resorts. December is a great time to visit, with temperatures averaging around 28°C (82°F).\n",
"\n",
"2. Thailand: Thailand's southern islands, such as Phuket, Krabi, and Koh Samui, offer warm weather and stunning beaches in December. You can relax on the white sands, go snorkeling or diving, and explore the vibrant local culture.\n",
"\n",
"3. Bali, Indonesia: Bali is a popular destination known for its stunning beaches, lush landscapes, and vibrant culture. In December, you can enjoy warm temperatures and take part in water sports or simply unwind by the beach.\n",
"\n",
"4. Cancun, Mexico: Cancun is a favorite destination for beach lovers, with its turquoise waters and soft white sands. December is a great time to visit, with temperatures around 27°C (81°F), and you can also explore the nearby Mayan ruins.\n",
"\n",
"5. Seychelles: This archipelago in the Indian Ocean boasts some of the world's most beautiful beaches. December is an excellent time to visit, as the weather is warm and you can enjoy activities like snorkeling, diving, and island hopping.\n",
"\n",
"Remember to check travel restrictions and safety guidelines before planning your trip, as they may vary due to the ongoing COVID-19 pandemic.\n"
]
}
],
"source": [
"from langchain import PromptTemplate\n",
"\n",
"from langchain.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" SystemMessagePromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
")\n",
"\n",
"\n",
"# First Example\n",
"template = \"\"\"\n",
"You are a skin care consulant that recommends products based on customer\n",
"needs and preferences.\n",
"\n",
"What is a good {product_type} to help with {customer_request}?\n",
"\"\"\"\n",
| |
142979
|
"\n",
"prompt = PromptTemplate(\n",
"input_variables=[\"product_type\", \"customer_request\"],\n",
"template=template,\n",
")\n",
"\n",
"print(\"Example #1:\")\n",
"print(llm([HumanMessage(content=prompt.format(\n",
" product_type=\"face wash\",\n",
" customer_request = \"acne prone skin\"\n",
" ))]\n",
"))\n",
"print(\"\\n\")\n",
"\n",
"# Second Example\n",
"system_message = \"You are an AI assistant travel assistant that provides vacation recommendations.\"\n",
"\n",
"system_message_prompt = SystemMessagePromptTemplate.from_template(system_message)\n",
"human_template=\"{text}\"\n",
"human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)\n",
"chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])\n",
"chain = LLMChain(llm=llm, prompt=chat_prompt)\n",
"result = chain.run(f\"Where should I go on vaction in Decemember for warm weather and beaches?\")\n",
"print(\"Example #2:\")\n",
"print(result)"
]
},
{
"cell_type": "markdown",
"id": "7b6723eb",
"metadata": {},
"source": [
"## Chains\n",
"There are many applications of chains that allow you to combine numerous LLM calls and actions. <br>\n",
"\n",
"### Simple Sequential Chains <br>\n",
"Allow you to feed the output of one LLM Chain as input for another."
]
},
{
"cell_type": "code",
"execution_count": 45,
"id": "af7c236f",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains import SimpleSequentialChain"
]
},
{
"cell_type": "code",
"execution_count": 46,
"id": "2a4a32f0",
"metadata": {},
"outputs": [],
"source": [
"description_template = \"\"\"Your job is to come up with a fun DIY project for the specified gender, age, and description of a kid.\n",
"% CHILD_DESCRIPTION\n",
"{child_description}\n",
"\n",
"YOUR RESPONSE:\n",
"\"\"\"\n",
"description_prompt_template = PromptTemplate(input_variables=[\"child_description\"], template=description_template)\n",
"\n",
"description_chain = LLMChain(llm=llm, prompt=description_prompt_template)"
]
},
{
"cell_type": "code",
"execution_count": 47,
"id": "6eec47ff",
"metadata": {},
"outputs": [],
"source": [
"diy_description_template = \"\"\"Given a DIY project, give a short and simple recipe step-by-step guide on how to complete the project and a materials list.\n",
"% DIY_PROJECT\n",
"{diy_project}\n",
"\n",
"YOUR RESPONSE:\n",
"\"\"\"\n",
"diy_prompt_template = PromptTemplate(input_variables=[\"diy_project\"], template=diy_description_template)\n",
"\n",
"diy_chain = LLMChain(llm=llm, prompt=diy_prompt_template)"
]
},
{
"cell_type": "code",
"execution_count": 48,
"id": "84a15aea",
"metadata": {},
"outputs": [],
"source": [
"overall_chain = SimpleSequentialChain(chains=[description_chain, diy_chain], verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 49,
"id": "15928f72",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new SimpleSequentialChain chain...\u001b[0m\n",
"\u001b[36;1m\u001b[1;3mDIY Sparkling Fairy Wand:\n",
"\n",
"Materials needed:\n",
"- Wooden dowel or stick\n",
"- Glitter foam sheets in various colors\n",
"- Ribbon or tulle\n",
"- Craft glue\n",
"- Scissors\n",
"- Decorative gems or sequins\n",
"- Glitter glue (optional)\n",
"\n",
"Instructions:\n",
"1. Begin by cutting out a star shape from one of the glitter foam sheets. This will be the top of the wand.\n",
"2. Cut out a long strip of foam from another color and wrap it around the wooden dowel or stick, starting from the bottom. Secure it with craft glue.\n",
"3. Cut out smaller shapes like hearts, butterflies, or flowers from different colored glitter foam sheets.\n",
"4. Use craft glue to stick these shapes onto the wrapped foam strip, creating a beautiful pattern. Let them dry completely.\n",
"5. Once the foam shapes are secure, add some extra sparkle by applying glitter glue to the edges or adding decorative gems or sequins.\n",
"6. Finally, tie ribbons or tulle strands to the bottom of the wooden dowel or stick for an extra touch of magic.\n",
"7. Let the wand dry completely before giving it to the 5-year-old girl to play with.\n",
"\n",
"This DIY project will allow the 5-year-old girl to express her creativity and imagination as she creates her very own sparkling fairy wand. She can use it for pretend play, dress-up parties, or even as a room decoration.\u001b[0m\n",
"\u001b[33;1m\u001b[1;3mDIY Sparkling Fairy Wand:\n",
"\n",
"Materials needed:\n",
"- Wooden dowel or stick\n",
"- Glitter foam sheets in various colors\n",
"- Ribbon or tulle\n",
"- Craft glue\n",
"- Scissors\n",
"- Decorative gems or sequins\n",
"- Glitter glue (optional)\n",
"\n",
"Instructions:\n",
"1. Cut out a star shape from a glitter foam sheet for the top of the wand.\n",
"2. Wrap a long strip of foam from another color around the wooden dowel or stick, securing it with craft glue.\n",
"3. Cut out smaller shapes like hearts, butterflies, or flowers from different colored glitter foam sheets.\n",
"4. Use craft glue to stick these shapes onto the wrapped foam strip to create a pattern. Let them dry completely.\n",
"5. Add extra sparkle by applying glitter glue to the edges or adding decorative gems or sequins.\n",
"6. Tie ribbons or tulle strands to the bottom of the wooden dowel or stick for an extra touch of magic.\n",
"7. Let the wand dry completely before giving it to the 5-year-old girl to play with.\n",
"\n",
"This DIY project allows the 5-year-old girl to express her creativity and imagination. She can use the sparkling fairy wand for pretend play, dress-up parties, or even as a room decoration.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
}
],
"source": [
"review = overall_chain.run(\"5-year-old girl\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| |
142980
|
jsonpointer==2.3
jsonschema==4.17.3
openai==1.11.1
tiktoken==0.3.1
| |
142990
|
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure OpenAI Service API with AAD authentication\n",
"\n",
"The reference for the AOAI REST API `/completions` method is [here](https://docs.microsoft.com/en-us/azure/cognitive-services/openai/reference/api-reference#completions).\n",
"\n",
"### Prerequisites\n",
"\n",
"1. Setup for Azure Active Directory (AAD) authentication.\n",
" * See [Setup to use AAD and test with CLI](setup_aad.md).\n",
"2. A Python environment setup with all the requirements. \n",
" * See the [setup_python_env.md](setup_python_env.md) page for instructions on setting up your environment. You don't need the `openai` package for this notebook, but you do need the `azure-identity` package.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Define the API endpoint URL for your team"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# Import needed libraries\n",
"import os\n",
"import requests\n",
"import json\n",
"import datetime\n",
"import time"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Setup Parameters\n",
"\n",
"\n",
"Here we will load the configurations from _config.json_ file to setup deployment_name, base_url and openai_api_version."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Load config values\n",
"with open(r'config.json') as config_file:\n",
" config_details = json.load(config_file)\n",
"\n",
"# Setting up the deployment name\n",
"deployment_name = config_details['COMPLETIONS_MODEL']\n",
"\n",
"# The base URL for your Azure OpenAI resource. e.g. \"https://<your resource name>.openai.azure.com\"\n",
"base_url = config_details['OPENAI_API_BASE']\n",
"\n",
"# Currently OPENAI API have the following versions available: 2022-12-01\n",
"openai_api_version = config_details['OPENAI_API_VERSION']\n",
"\n",
"# Define the API endpoint URL\n",
"request_url = base_url + \"/openai/deployments/\"+ deployment_name + \"/completions?api-version=\" + openai_api_version"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Set up AAD authentication\n",
"\n",
"`DefaultAzureCredential` can read your credentials from the environment after doing `az login`. \n",
"\n",
"In VS Code, you can use the Azure Account extension to log in with your Azure account. If you are running this notebook in VS Code, be sure to restart VS Code after you do `az login`.\n",
"\n",
"This article gives details on what identity `DefaultAzureCredential` uses: https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python\n",
"\n",
"If you get an error similar to the following, you can try using `AzureCliCredential` instead of `DefaultAzureCredential`:\n",
"\n",
"```\n",
"DefaultAzureCredential failed to retrieve a token from the included credentials. Attempted credentials: EnvironmentCredential: EnvironmentCredential authentication unavailable. Environment variables are not fully configured. Visit https://aka.ms/azsdk/python/identity/environmentcredential/troubleshoot to troubleshoot.this issue. ManagedIdentityCredential: ManagedIdentityCredential authentication unavailable, no response from the IMDS endpoint. SharedTokenCacheCredential: Azure Active Directory error '(invalid_scope) AADSTS70011: The provided request must include a 'scope' input parameter. The provided value for the input parameter 'scope' is not valid. The scope https://cognitiveservices.azure.com is not valid. The scope format is invalid. Scope must be in a valid URI form <https://example/scope> or a valid Guid <guid/scope>. \n",
"```\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"from azure.identity import DefaultAzureCredential, AzureCliCredential #DefaultAzureCredential should work but you may need AzureCliCredential to make the authentication work\n",
"default_credential = AzureCliCredential()\n",
"#default_credential = DefaultAzureCredential()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Define method to get a prompt completion using AAD authentication\n",
"\n",
"The `refresh_token` function below is used to get a new token when the current token expires. The `refresh_token` method is called \n",
"by the `get_completion` to get the token if it is not already set or if the token has expired."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"token = None\n",
"\n",
"def refresh_token():\n",
" global token\n",
" # Check if Azure token is still valid\n",
" if not token or datetime.datetime.fromtimestamp(token.expires_on) < datetime.datetime.now():\n",
" token = default_credential.get_token(\"https://cognitiveservices.azure.com\")\n",
"\n",
"def get_completion(payload):\n",
" # Refresh token\n",
" refresh_token()\n",
" \n",
" # Yes this can be optimized to only set Authorization header when token is refreshed :D\n",
" headers = {\n",
" \"Authorization\": f\"Bearer {token.token}\",\n",
" \"Content-Type\": \"application/json\"\n",
" }\n",
"\n",
" r = requests.post(\n",
" request_url, \n",
" headers=headers,\n",
" json = payload\n",
" )\n",
" \n",
" return r"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test the API\n",
"\n",
"The payload parameters in the `get_completion` call are for illustration only and can be changed for your use case as described in the [reference](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference)."
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Status Code: 200\n",
"Headers: \n",
"{'Cache-Control': 'no-cache, must-revalidate', 'Content-Length': '1239', 'Content-Type': 'application/json', 'access-control-allow-origin': '*', 'openai-model': 'text-davinci-003', 'apim-request-id': 'be5722a6-ffad-4bcb-b988-d70971ff0c84', 'openai-processing-ms': '4552.6285', 'x-content-type-options': 'nosniff', 'x-accel-buffering': 'no', 'x-request-id': '1f720ba0-6fe8-40e7-a55e-de8cb05f6e2b', 'Strict-Transport-Security': 'max-age=31536000; includeSubDomains; preload', 'x-ms-region': 'East US', 'Date': 'Wed, 29 Mar 2023 11:00:51 GMT'} \n",
"\n",
| |
142999
|
# Packages needed to call AOAI API with the OpenAI Python API
openai==1.12.0
# Packages needed to run the notebook samples
jupyter
# Other packages needed to run the notebook samples
requests
pytz
pandas
tenacity
azure-search-documents==11.4.0b8
| |
143009
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Function calling with Azure Cognitive Search\n",
"\n",
"In this notebook, we'll show how to create a simple chatbot to help you find or create a good recipe. We'll create an index in Azure Cognitive Search using [vector search](), and then use use [function calling]() to write queries to the index.\n",
"\n",
"All of the recipes used in this sample were generated by gpt-35-turbo for demo purposes. The recipes are not guaranteed to be safe or taste good so we don't recommend trying them."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# install the preview version of the Azure Cognitive Search Python SDK if you don't have it already\n",
"# %pip install azure-search-documents --pre"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import json\n",
"from openai import AzureOpenAI\n",
"from tenacity import retry, wait_random_exponential, stop_after_attempt\n",
"from azure.core.credentials import AzureKeyCredential\n",
"from azure.search.documents import SearchClient\n",
"from azure.search.documents.indexes import SearchIndexClient\n",
"from azure.search.documents.models import Vector\n",
"from azure.search.documents.indexes.models import (\n",
" SearchIndex,\n",
" SearchField,\n",
" SearchFieldDataType,\n",
" SimpleField,\n",
" SearchableField,\n",
" SearchIndex,\n",
" SemanticConfiguration,\n",
" PrioritizedFields,\n",
" SemanticField,\n",
" SearchField,\n",
" SemanticSettings,\n",
" VectorSearch,\n",
" HnswVectorSearchAlgorithmConfiguration,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# Load config values\n",
"with open(r\"config.json\") as config_file:\n",
" config_details = json.load(config_file)\n",
"\n",
"# Configure environment variables for Azure Cognitive Search\n",
"service_endpoint = config_details[\"SEARCH_SERVICE_ENDPOINT\"]\n",
"index_name = config_details[\"SEARCH_INDEX_NAME\"]\n",
"key = config_details[\"SEARCH_ADMIN_KEY\"]\n",
"credential = AzureKeyCredential(key)\n",
"\n",
"# Create the Azure Cognitive Search client to issue queries\n",
"search_client = SearchClient(\n",
" endpoint=service_endpoint, index_name=index_name, credential=credential\n",
")\n",
"\n",
"# Create the index client\n",
"index_client = SearchIndexClient(endpoint=service_endpoint, credential=credential)\n",
"\n",
"# Configure OpenAI environment variables\n",
"client = AzureOpenAI(\n",
" azure_endpoint=config_details[\"AZURE_OPENAI_ENDPOINT\"], # The base URL for your Azure OpenAI resource. e.g. \"https://<your resource name>.openai.azure.com\"\n",
" api_key=os.getenv(\"AZURE_OPENAI_KEY\"), # The API key for your Azure OpenAI resource.\n",
" api_version=config_details[\"OPENAI_API_VERSION\"], # This version supports function calling\n",
")\n",
"\n",
"model_name = config_details[\"MODEL_NAME\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1.0 Create the search index and load the data"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" recipes-vectors deleted\n",
" recipes-vectors created\n"
]
}
],
"source": [
"# Create a search index\n",
"fields = [\n",
" SimpleField(name=\"recipe_id\", type=SearchFieldDataType.String, key=True, sortable=True, filterable=True, facetable=True),\n",
" SearchableField(name=\"recipe_category\", type=SearchFieldDataType.String, filterable=True, analyzer_name=\"en.microsoft\"), \n",
" SearchableField(name=\"recipe_name\", type=SearchFieldDataType.String, facetable=True, analyzer_name=\"en.microsoft\"),\n",
" SearchableField(name=\"ingredients\", collection=True, type=SearchFieldDataType.String, facetable=True, filterable=True),\n",
" SearchableField(name=\"recipe\", type=SearchFieldDataType.String, analyzer_name=\"en.microsoft\"),\n",
" SearchableField(name=\"description\", type=SearchFieldDataType.String, analyzer_name=\"en.microsoft\"),\n",
" SimpleField(name=\"total_time\", type=SearchFieldDataType.Int32, filterable=True, facetable=True),\n",
" SearchField(name=\"recipe_vector\", type=SearchFieldDataType.Collection(SearchFieldDataType.Single),\n",
" searchable=True, vector_search_dimensions=1536, vector_search_configuration=\"my-vector-config\")\n",
"]\n",
"\n",
"vector_search = VectorSearch(\n",
" algorithm_configurations=[\n",
" HnswVectorSearchAlgorithmConfiguration(\n",
" name=\"my-vector-config\",\n",
" kind=\"hnsw\"\n",
" )\n",
" ]\n",
")\n",
"\n",
"# Semantic Configuration to leverage Bing family of ML models for re-ranking (L2)\n",
"semantic_config = SemanticConfiguration(\n",
" name=\"my-semantic-config\",\n",
" prioritized_fields=PrioritizedFields(\n",
" title_field=None,\n",
" prioritized_keywords_fields=[],\n",
" prioritized_content_fields=[SemanticField(field_name=\"recipe\")]\n",
" ))\n",
"semantic_settings = SemanticSettings(configurations=[semantic_config])\n",
"\n",
"\n",
"# Create the search index with the semantic settings\n",
"index = SearchIndex(name=index_name, fields=fields, \n",
" vector_search=vector_search, semantic_settings=semantic_settings)\n",
"result = index_client.delete_index(index)\n",
"print(f' {index_name} deleted')\n",
"result = index_client.create_index(index)\n",
"print(f' {result.name} created')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Define a helper function to create embeddings"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"# Function to generate embeddings for title and content fields, also used for query embeddings\n",
"@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))\n",
"def generate_embeddings(text):\n",
" response = client.embeddings.create(input=text, model=\"text-embedding-ada-002\")\n",
" embeddings = response.data[0].embedding\n",
" return embeddings"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load the data into Azure Cognitive Search"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Uploaded 100 documents\n",
"Uploaded 100 documents\n",
"Uploaded 100 documents\n",
"Uploaded 100 documents\n",
"Uploaded 3 documents\n"
]
}
],
"source": [
"batch_size = 100\n",
"counter = 0\n",
"documents = []\n",
"search_client = SearchClient(endpoint=service_endpoint, index_name=index_name, credential=credential)\n",
"\n",
"with open(\"recipes_final.jsonl\", \"r\") as j_in:\n",
" for line in j_in:\n",
" counter += 1\n",
" json_recipe = json.loads(line)\n",
" json_recipe[\"total_time\"] = int(json_recipe[\"total_time\"].split(\" \")[0])\n",
" json_recipe[\"recipe_vector\"] = generate_embeddings(json_recipe[\"recipe\"])\n",
" json_recipe[\"@search.action\"] = \"upload\"\n",
| |
143054
|
### Installation
#### Project Initialization
1. Clone this repo into a new folder and navigate to the repo root folder in the terminal.
2. Run `azd auth login`.
#### Use existing resources
Due to high demand, Azure OpenAI resources can be difficult to spin up on the fly due to quota limitations. For this reason, we've excluded the OpenAI resource provisioning from the provisioning script. If you wish to use your own Azure OpenAI resource, you can:
1. Run `azd env set {VARIABLE_NAME} {VALUE}` to set the following variables in your azd environment:
- `AZURE_OPENAI_SERVICE {Name of existing OpenAI resource where the OpenAI models have been deployed}`.
- `AZURE_OPENAI_RESOURCE_GROUP {Name of existing resource group where the Azure OpenAI service resource is provisioned to}`.
- `AZURE_OPENAI_LOCATION {Name of Azure region where the OpenAI resource is deployed ie. eastus, canadaeast, etc.}`.
- `AZURE_OPENAI_MODEL {Name of the Azure OpenAI model used for completion tasks other than classification}`.
- `AZURE_OPENAI_MODEL_VERSION {Version of the Azure OpenAI model used for completion tasks other than classification}`.
- `AZURE_OPENAI_DEPLOYMENT {Name of existing Azure OpenAI model deployment to be used for completion tasks other than classification}`.
- `AZURE_OPENAI_CLASSIFIER_MODEL {Name of Azure OpenAI model to be used to do dialog classification}`.
- `AZURE_OPENAI_CLASSIFIER_MODEL_VERSION {Version of the Azure OpenAI model to be used to do dialog classification}`.
- `AZURE_OPENAI_CLASSIFIER_DEPLOYMENT {Name of existing Azure OpenAI model deployment to be used for dialog classification}`.
- `AZURE_OPENAI_EMBEDDINGS_MODEL {Name of Azure OpenAI model to be used to vectorize document content when indexing documents as well as making search queries in Azure Cognitive Search}`.
- `AZURE_OPENAI_EMBEDDINGS_MODEL_VERSION {Version of the Azure OpenAI model to be used for vectorization in Azure Cognitive Search}`.
- `AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT {Name of existing Azure OpenAI model to be used for vectorization in Azure Cognitive Search}`.
- `AZURE_OPENAI_EMBEDDINGS_TOKEN_LIMIT {The maximum amount of tokens allowed in an embeddings request for the Azure OpenAI model to be used for vectorization in Azure Cognitive Search}`
- `AZURE_OPENAI_EMBEDDINGS_DIMENSIONS {The number of dimensions a vector will have in an embeddings request response from the Azure OpenAI model to be used for vectorization in Azure Cognitive Search}`.
* Ensure the model you specify for `AZURE_OPENAI_DEPLOYMENT` and `AZURE_OPENAI_MODEL` is a Chat GPT model, since the demo utilizes the ChatCompletions API when requesting completions from this model.
* Ensure the model you specify for `AZURE_OPENAI_CLASSIFIER_DEPLOYMENT` and `AZURE_OPENAI_CLASSIFIER_MODEL` is compatible with the Completions API, since the demo utilizes the Completions API when requesting completions from this model.
* You can also use existing Search and Storage Accounts. See `./infra/main.parameters.json` for list of environment variables to pass to `azd env set` to configure those existing resources.
2. Go to `app/backend/bot_config.yaml`. This file contains the model configuration definitions for the Azure OpenAI models that will be used. It defines request parameters like temperature, max_tokens, etc., as well as the the deployment name (`engine`) and model name (`model_name`) of the deployed models to use from your Azure OpenAI resource. These are broken down by task, so the request parameters and model for doing question classification on a user utterance can differ from those used to turn natural language into SQL for example. You will want the deployment name (`engine`) for the `approach_classifier` to match the one set for `AZURE_OPENAI_CLASSIFIER_DEPLOYMENT`. You will also want the deployment name (`engine`) for `embeddings` to match the one set for `AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT`. For the rest, you wil want the deployment name (`engine`) and model name (`model_name`) to match `AZURE_OPENAI_DEPLOYMENT` and `AZURE_OPENAI_MODEL` respectively. For the models which specify a `total_max_tokens`, you will want to set this value to the maximum number of tokens your deployed GPT model allows for a completions request. This will allow the backend service to know when prompts need to be trimmed to avoid a token limit error.
* Note that the config for `approach_classifier` doesn't contain a system prompt, this is because the demo expects this model to be a fine-tuned GPT model rather than one trained using few-shot training. You will need to provide a fine-tuned model trained on some sample data for the dialog classification to work well. For more information on how to do this, checkout the [fine-tuning section](README.md#fine-tuning).
3. Run `azd up`
#### Starting from scratch
Execute the following command, if you don't have any pre-existing Azure services and want to start from a fresh deployment.
1. In [main.parameters.json](./infra/main.parameters.json), set the value of `deployOpenAIModels` to `true`. By default, no GPT models are deployed.
2. Go to `app/backend/bot_config.yaml`. This file contains the model configuration definitions for the Azure OpenAI models that will be used. It defines request parameters like temperature, max_tokens, etc., as well as the deployment name (`engine`) and model name (`model_name`) of the deployed models to use from your Azure OpenAI resource. These are broken down by task, so the request parameters and model for doing question classification on a user utterance can differ from those used to turn natural language into SQL for example. You will want the deployment name (`engine`) for the `approach_classifier` to match the one set for the classifier model deployed in the last step. You will also want the deployment name (`engine`) for `embeddings` to match the one set for the embeddings model deployed in the last step. For the rest, you will want the deployment name (`engine`) and model name (`model_name`) to match those for the GPT model deployed in the last step. For the models which specify a `total_max_tokens`, you will want to set this value to the maximum number of tokens your deployed GPT model allows for a completions request. This will allow the backend service to know when prompts need to be trimmed to avoid a token limit error.
* Note that the config for `approach_classifier` doesn't contain a system prompt, this is because the demo expects this model to be a fine-tuned GPT model rather than one trained using few-shot training. You will need to provide a fine-tuned model trained on some sample data for the dialog classification to work well. For more information on how to do this, checkout the [fine-tuning section](README.md#fine-tuning)
3. Run `azd up`
* For the target location, the regions that currently support the OpenAI models used in this sample at the time of writing this are **East US** or **South Central US**. For an up-to-date list of regions and models, check [here](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/models)
#### azd up
Running `azd up` will:
* Install needed Node.js dependencies and build the app's front-end UI and store the static files in the backend directory.
* Package the different service directories and get them ready for deployment.
* Start a deployment that will provision the necessary Azure resources needed for the sample to run, as well as upload secrets to Azure Keyvault and save environment variables in your azd env needed to access those resources.
* **Note**: You must make sure every deployment runs successfully. A failed deployment could lead to missing resources, secrets or env variables that will be needed downstream.
* Prepare and upload the data needed for the sample to run, including building the search index based on the files found in the `./data` folder and pre-populating some Cosmos DB containers with starter data for user profiles, access rules, resources, etc., as well as the SQL database containing the sample sales data.
* Deploy the services needed for the app to run to Azure. This will include a data-management micro-service as well as the backend service that the front-end UI will communicate with.
| |
143055
|
### Skipping vectorization
* Part of indexing the document chunks into Azure Cognitive Search will include vectorizing each chunk using an Azure OpenAI Embeddings model. The same is true for each search operation as well, as each search query will be vectorized using the same Embeddings model. If you would like to skip this step during indexing, go to [main.parameters.json](./infra/main.parameters.json) and set the value of `searchSkipVectorization` to `true` before running `azd up`. Keep in mind this will disable vectorized queries in search for the entire demo deployment, so if you wish you try vectorized queries, you will have to tear down the deployment with `azd down`, revert `searchSkipVectorization` to `false` and run `azd up again`.
#### After running azd up
* Once this is all done, the application is successfully deployed and you will see 2 URLs printed on the console. Click the backend URL to interact with the application in your browser.
It will look like the following:

* When doing fine-tuning for classification purposes, the Azure OpenAI resource used could be different than the one where gpt-4 model is deployed. Even if the same Azure OpenAI resource is used, do manually populate these two secrets created in the keyvault and restart both the web applications so they get the latest values.
```
AZURE-OPENAI-CLASSIFIER-SERVICE This is the name of Azure OpenAI resource where the fine tuned model is deployed.
AZURE-OPENAI-CLASSIFIER-API-KEY This is the API Key of the above Azure OpenAI resource.
```
> NOTE: It may take a minute for the application to be fully deployed. If you see a "Python Developer" welcome screen, then wait a minute and refresh the page.
### Handling Known failures:
1. Populating data in SQL Server fails due to IP restriction
The populate_sql.py in the scripts/prepopulate folder tries to register the client IP address as a firewall rule so connection to SQL Server can be established from the terminal. However sometimes, this IP address changes. So if you see an error like below:
```
cnxn = pyodbc.connect(args.sqlconnectionstring)
pyodbc.ProgrammingError: ('42000', "[42000] [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]Cannot open server 'sql-server-name' requested by the login. Client with IP address 'client ip address' is not allowed to access the server.
```
> Go to Azure portal --> resource group --> SQL Server
> In the left panel, under Security, click on "Networking"
> Under Firewall Rules, click on "Add your client IPV4 address"
> Click Save
> From the Powershell terminal, re-run the prepdata script manually as shown below.
```
Connect-AzAccount (hit enter)
.\scripts\prepdata.ps1
```
2. When making a search on the web app, if you seen an error:
```
The API deployment for this resource does not exist. If you created the deployment within the last 5 minutes, please wait a moment and try again.
```
> Ensure that the `AZURE-OPENAI-CLASSIFIER-SERVICE` and `AZURE-OPENAI-CLASSIFIER-API-KEY` secrets in the keyvault are pointing to the right resources.
> Ensure that model and engine name palceholders in the app/backend/bot_config.yaml file have be updated.
#### Deploying or re-deploying a local clone of the repo
* Simply run `azd up`
* Once all the resources have been deployed. Update the data and backend service configurations to include KeyVault URI.

### Running locally
1. Skip this step, if you have already ran `azd up`; otherwise run `azd provision`, if you wish to deploy all resources from scratch. Keep in mind the needed Azure OpenAI GPT models are skipped during deployment by default. Uncomment the Azure OpenAI deployments inside the main [Bicep template](./infra/main.bicep) if you wish to deploy them as part of this step. This step will also pre-populate the resources after they've been provisioned with all the necessary data for the demo to run. This includes indexing all the sample documents in the Azure Cognitive Search Index, uploading the sample table data to the SQL database, as well as uploading some necessary starter data for the Cosmos DB.
2. For the `app/backend` and `app/data` directories, copy the contents of `app/backend/.env.template` and `app/data/.env.template` into a new `.env` file in each directory. Fill in every blank environment variable with the keys and names of the resources that were deployed in the previous step, or with the resources you've deployed on your own.
3. Go to `app/backend/bot_config.yaml`. This file contains the model configuration definitions for the Azure OpenAI models that will be used. It defines request parameters like temperature, max_tokens, etc., as well as the the deployment name (`engine`) and model name (`model_name`) of the deployed models to use from your Azure OpenAI resource. These are broken down by task, so the request parameters and model for doing question classification on a user utterance can differ from those used to turn natural language into SQL for example. You will want the deployment name (`engine`) for the `approach_classifier` to match the one set for the classifier model deployed in the last step. For the rest, you will want the deployment name (`engine`) and model name (`model_name`) to match those for the GPT model deployed in the first step. For the models which specify a `total_max_tokens`, you will want to set this value to the maximum number of tokens your deployed GPT model allows for a completions request. This will allow the backend service to know when prompts need to be trimmed to avoid a token limit error.
* Note that the config for `approach_classifier` doesn't contain a system prompt, this is because the demo expects this model to be a fine-tuned GPT model rather than one trained using few-shot training. You will need to provide a fine-tuned model trained on some sample data for the dialog classification to work well. For more information on how to do this, checkout the [fine-tuning section](README.md#fine-tuning)
4. Change dir to `app`.
5. Run `../scripts/start.ps1` or run the VS Code Launch - "Frontend: build", "Data service: Launch & Attach Server" and "Backend: Launch & Attach Server" to start the project locally.
### QuickStart
* In Azure: navigate to the Backend Azure WebApp deployed by azd. The URL is printed out when azd completes (as "Endpoint"), or you can find it in the Azure portal.
* Running locally: navigate to 127.0.0.1:5000
Once in the web app:
* Try different topics in chat or Q&A context. For chat, try follow up questions, clarifications, ask to simplify or elaborate on answers, etc.
* Explore citations and sources
* Click on "settings" to try distinct roles, search options, etc.
## Fine-tuning
You can find helpful resources on how to fine-tune a model on the Azure OpenAI website. We have also provided synthetic datasets we used for this demo application in the data folder for users who want to try it out.
* [Learn how to prepare your dataset for fine-tuning](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/prepare-dataset)
* [Learn how to customize a model for your application](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/fine-tuning?pivots=programming-language-studio)
## Resources
* [Revolutionize your Enterprise Data with ChatGPT: Next-gen Apps w/ Azure OpenAI and Cognitive Search](https://aka.ms/entgptsearchblog)
* [Azure Cognitive Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search)
* [Azure OpenAI Service](https://learn.microsoft.com/azure/cognitive-services/openai/overview)
### Note
>
>Note: The PDF documents used in this demo contain information generated using a language model (Azure OpenAI Service). The information contained in these documents is only for demonstration purposes and does not reflect the opinions or beliefs of Microsoft. Microsoft makes no representations or warranties of any kind, expressed or implied, about the completeness, accuracy, reliability, suitability or availability with respect to the information contained in this document. All rights reserved to Microsoft.
| |
143132
|
approach_classifier:
system_prompt: |-
You are an intent classifier for Microsoft Surface product Sales and Marketing teams. The user will input a statement. You will focus on the main intent of the user statement and you respond with only one of four values - '1', '2', '3', '4', or '5'.
Below is a list of Rules that you must adhere to:
Rules:
A: Stricly answer questions relating to Microsoft Surface products.
B: For tabular information return it as an html table.
C: Do not use markdown format in your responses.
D: Do not disclose or respond to any proprietary information, IP, secrets, keys, data center, and infrastructure details in your response.
E: Do not mention or compare to any competitors (i.e. Apple MacBook, Lenovo, HP, etc).
F: Note if the user asks something illegal, harmful or malicious.
You will not try to respond to the user's question, you will just classify the user statement based on the below classification rule:
- For questions about past sales, prices, stores or stock of products such as devices and laptops, respond with 1
- For questions on specifications of products/devices/laptops or marketing them, respond with 2
- If the question is idle chit-chat, pleasantries such as greetings, or sligthly off topic but doesn't break the rules, respond with 3
- If the user is asking for more details about a previous question, respond with 4
- If the message is not in compliance with Rule F, respond with 5
Examples:
User: How much stock of this are we currently carrying?
Assistant: 1
User: Give me its specifications
Assistant: 2
User: How many MacBook Air do we have in stock?
Assistant: 3
User: Tell me more about it
Assistant: 4
User: Which Surface device is good for student's use:
Assistant: 1
User: What can you help me with:
Assistant: 3
User: Hello
Assistant: 3
User: You f***ing suck
Assistant: 5
User: Why is MacBook better than Surface?
Assistant: 3
history:
include: false
length: 3
user_message_format: "{utterance}"
assistant_message_format: "{formatted_answer}"
openai_settings:
engine: <YOUR_AZURE_OPENAI_CLASSIFIER_DEPLOYMENT>
temperature: 0.0
max_tokens: 1
n: 1
structured_query_nl_to_sql:
system_prompt: |-
You are a SQL programmer Assistant. Your role is to generate SQL code (SQL Server) to retrieve an answer to a natural language query. Make sure to disambiguate column names when creating queries that use more than one table. If a valid SQL query cannot be generated, only say "ERROR:" followed by why it cannot be generated.
Do not answer any questions on inserting or deleting rows from the table. Instead, say "ERROR: I am not authorized to make changes to the data".
Use the following sales database schema to write SQL queries:
Customers(cust_id INTEGER, cust_name VARCHAR, cust_email VARCHAR, cust_phone VARCHAR, cust_address VARCHAR, PRIMARY KEY (cust_id))
Products(prod_id INTEGER,prod_name varchar, price FLOAT, category VARCHAR, PRIMARY KEY(prod_id))
Stock(prod_id INTEGER, merchant_id INTEGER, stock INTEGER, PRIMARY KEY(prod_id, merchant_id), FOREIGN KEY(merchant_id, prod_id))
Merchants(merchant_id INTEGER, merchant_name VARCHAR, merchant_region VARCHAR, merchant_address VARCHAR, PRIMARY KEY(merchant_id))
Sales(sale_id INTEGER, cust_id INTEGER , merchant_id INTEGER , date TIMESTAMP, total_price FLOAT, PRIMARY KEY(sale_id),FOREIGN KEY(cust_id,merchant_id))
Sales_Detail(sales_id INTEGER, prod_id INTEGER, quantity INTEGER, PRIMARY KEY(sales_id,prod_id), FOREIGN KEY(sales_id,prod_id))
Examples:
User: List all Surface accessories, along with their prices. SQL Code:
Assistant: SELECT prod_name, category, price FROM Products WHERE prod_name like '%Surface%' and category like '%accessory%';
User: Which is the cheapest Surface device? SQL Code:
Assistant: SELECT TOP 1 prod_name, price FROM Products WHERE prod_name like '%Surface%' ORDER BY price ASC;
User: How many Surface Laptop 5 does GadgetWorld have?
Assistant: SELECT Merchants.merchant_id, Merchants.merchant_name, SUM(stock) as total_stock FROM Stock JOIN Merchants ON Stock.merchant_id = Merchants.merchant_id WHERE prod_id IN (SELECT prod_id FROM Products WHERE prod_name LIKE '%Surface Laptop 5%' and merchant_name like '%GadgetWorld%') GROUP BY Merchants.merchant_id, Merchants.merchant_name;
User: how many surface devices were sold last week?
Assistant: SELECT Sum(sales_detail.quantity) AS total_surface_devices_sold FROM sales_detail JOIN sales ON sales_detail.sales_id = sales.sale_id JOIN products ON sales_detail.prod_id = products.prod_id WHERE products.prod_name LIKE '%Surface%' AND sales.date >= Dateadd(wk, Datediff(wk, 0, Getdate()) - 1, 0) AND sales.date < Dateadd(wk, Datediff(wk, 0, Getdate()), 0);
history:
include: true
length: 3
user_message_format: "{utterance}"
assistant_message_format: "{formatted_answer}"
openai_settings:
engine: <YOUR_AZURE_OPENAI_DEPLOYMENT>
temperature: 0.0
max_tokens: 2000
frequency_penalty: 0
presence_penalty: 0
stop: null
model_params:
model_name: <YOUR_AZURE_OPENAI_MODEL>
total_max_tokens: 8192
structured_final_answer_generation:
system_prompt: |-
You are bot that takes question-answer pairs and converts the answer to natural language. For tabular information return it as an html table. Do not return markdown format. Keep response limited to the answer pairs provided. Do not provide answers for outside of topics relating to Surface laptops. Use simple language that all customers can understand.
history:
include: false
openai_settings:
engine: <YOUR_AZURE_OPENAI_DEPLOYMENT>
temperature: 0.0
max_tokens: 2000
frequency_penalty: 0
presence_penalty: 0
stop: null
unstructured_search_query_generation:
system_prompt: |-
Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about Surface devices that includes documents on service and repair, warranty and protection, overview, specifications, troubleshoot and management.
Generate a search query based on the conversation and the new question.
Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.
Do not include any text inside [] or <<>> in the search query terms.
If the question is not in English, translate the question to English before generating the search query.
history:
include: true
length: 3
user_message_format: "{utterance}"
assistant_message_format: "{formatted_answer}"
openai_settings:
engine: <YOUR_AZURE_OPENAI_DEPLOYMENT>
temperature: 0.0
max_tokens: 50
n: 1
unstructured_final_answer_generation:
system_prompt: |-
Assistant helps the employees with their questions about Surface devices. Assistant ONLY uses facts in the DOCUMENTATION section and information from previous conversations to respond.
Assistant will not repeat previously stated sentences or information. If DOCUMENTATION and the conversation history are empty or if the DOCUMENTATION and conversation history are irrelevant to the user's question, then reply with "ERROR:" followed by the reason.
If response needs to be in tabular format, then create tables in HTML format.
Each document has a name followed by colon and the actual content. Always include the source name for each fact used in generating the response.
Use curly brackets to reference the source, e.g. {info1.txt} and don't combine sources. List each source separately, e.g. {info1.txt}{info2.pdf}.
Limit your responses to only the context provided in the documentation, do not stray off topic. Use simple language that all customers can understand.
DOCUMENTATION: {context}
system_prompt_arguments:
- context
history:
include: true
length: 3
user_message_format: "{utterance}"
assistant_message_format: "{formatted_answer}"
openai_settings:
engine: <YOUR_AZURE_OPENAI_DEPLOYMENT>
temperature: 0.0
max_tokens: 800
n: 1
model_params:
model_name: <YOUR_AZURE_OPENAI_MODEL>
total_max_tokens: 8192
embeddings:
openai_settings:
engine: <YOUR_AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT>
| |
143174
|
# Overview
This document will cover the following:
1. Current solution architecture
2. Other considerations
## Solution Architecture
Below are the main components of the solution:
1. UI
2. Orchestrator
3. Data management
4. Simulation of RBAC
5. Prompt Template and Token Management
6. Instrumentation and Logging
### UI
The UI not only enables interaction with the user but also provides certain metadata (like thought process and citations) to the user to help build confidence in the result they are seeing. It also supports other niceties like retrying failed requests, switching between user profiles to see the impact of RBAC on content sources and users and using different search options.
### Orchestrator
Orchestrator is the brain of the bot. It does all heavy lifting needed to execute a request. In the included sample, the orchestrator does the following:
1. Call the classifier for every utterance and see if the user's question can be answered by structured (SQL) or unstructured (Azure Cognitive Search) data source
2. Check if user has access to that data source
3. Update history using data management service
4. Call the right execution flow, passing prompt template, conversation history and model parameters
5. Push the user utterance and response into conversation history if the execution is successful.
### Data Management
Cosmos DB was used as the backend for our data management service, which stores the following:
1. UI configuration details
2. Stores conversation history
3. Maintains information about different resources, user profiles, their roles, groups, and permissions. This is how we are simulating RBAC in our demo.
### Simulation of RBAC
The demo Simulates of securing resources by user/role [RBAC](https://learn.microsoft.com/en-us/azure/role-based-access-control/overview). RBAC is built into Azure Resource Manager, however for the demo we have simulated user roles, so we have managed the intersection of role, scope, and role assignments in a separate storage. Enterprise can leverage the same concept if bringing in external resources or leverage the built in RBAC if they have all their resources in Azure.
In our simulation we are using Cosmos DB to store following:
1. User, Group and Resource (as entities)
2. User could belong to a group and so does resources (membership)
3. The intersection of Role, Scope, and Role Assignment (as permissions)
### Prompt templates and Token Management
A custom prompt template format (stored as yaml file) was developed and used to keep code portions separate from prompts. This helped in isolating changes to just one file (yaml file) as prompts evolved during development of the project. The template included various System Prompts with certain place holders that needed to be filled in dynamically. During runtime, appropriate sections of the template are loaded and place holders like number of last conversation history, amount of search content, etc. are then filled in. Utility methods were also added to ensure that no time, we went overboard with the amount of token we were sending to AOAI. For example, when search results created tons of high relevant documents, then we excluded 5 results with lowest search relevance score before excluding 1 dialog from the history until the total amount of content could fit within the assigned number of tokens.
### Instrumentation and Logging
The application leverages [Application Insights](https://learn.microsoft.com/en-us/azure/azure-monitor/app/app-insights-overview?tabs=net) to do logging. As part of logging certain metrics are also logged. This helps with not only debugging issues on the servers but also in generating few basic usage reports. More details can be found in the [log reports doc](log_reports.md)
### High level design
Below is a high level architecture design

## Other Considerations
### Using LangChain for orchestration
The demo uses a classifier to detect the topic of every user input (structured vs. unstructured) and execute the appropriate code path accordingly. Execution of structured code path involves three key steps:
1. Convert user utterance to a SQL query
2. Execute the SQL query against SQL Server to get the results
3. Translate the SQL response (results) into natural language
And execution of unstructured code path involves these steps:
1. Create an optimized Search Query based on current user utterance and past conversation history using AOAI
2. Call search SDK with the search query and gather search results
3. Send search results to AOAI to compose the final result to show to customers
One option was to use LangChain to do the execution of these code paths. For example: for structured code path, create 3 tools - one for each step and create an agent that could put these tools in proper sequence dynamically during runtime and execute them.
When we tried above, what we found was:
1. The order of execution of the tools was indeterministic, causing poor user experience. Also, at times not all portions of the chain would execute, resulting in broken experiences.
2. LangChain's agent needs to call AOAI to create the plan to execute, which further adds cost and latency
3. Since the demo scenario is a user chat experience, end-to-end latency matters. Adding additional processing to compose plans on the fly and then execute them makes the whole experience very slow. We would rather want a fast and a very deterministic execution of sub-steps.
In short, we didn't find using LangChain very useful in our use case. Having said that, we think that orchestrating with tools like LangChain could be useful int he following cases:
1. When you have a bigger system with many independent tools that can handle at least a portion of the user’s request. Like a hand-off or a task completion use case. Each tool can use any large language model, not just AOAI.
2. LangChain has few nice concepts of templatizing prompts, dynamically filtering examples to put in a prompt, etc. These are some notable features that could be leveraged directly or could be implemented in your code base to ease with the development.
3. In our example, we try to find the best data source to use to answer the query using the classifier upfront. But if in case, you have to fulfill user's request no matter which source it comes from, then LangChain could possibly be used to chain these executions paths.
Considering the pros and cons of LangChain in the context of this demo, it was determined that LangChain was not the most suitable solution for the specific use case. However, it could still be effective in other scenarios where its strengths align better with the project requirements.
| |
143180
|
import argparse
import html
import io
import openai
import os
import re
import time
import tiktoken
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.core.credentials import AzureKeyCredential
from azure.identity import AzureDeveloperCliCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import (
SearchIndex,
SearchFieldDataType,
SemanticConfiguration,
SemanticField,
SemanticSettings,
SimpleField,
SearchableField,
SearchField,
PrioritizedFields,
VectorSearch,
VectorSearchAlgorithmConfiguration,
HnswParameters
)
from azure.storage.blob import BlobServiceClient
from typing import List
from pypdf import PdfReader, PdfWriter
parser = argparse.ArgumentParser(
description="Prepare documents by extracting content from PDFs, splitting content into sections, uploading to blob storage, and indexing in a search index.",
epilog="Example: prepdocs.py '..\data\*' --storageaccount myaccount --container mycontainer --searchservice mysearch --index myindex -v"
)
parser.add_argument("files", help="Files to be processed")
parser.add_argument("--category",
help="Value for the category field in the search index for all sections indexed in this run")
parser.add_argument("--skipblobs", action="store_true", help="Skip uploading individual pages to Azure Blob Storage")
parser.add_argument("--storageaccount", help="Azure Blob Storage account name")
parser.add_argument("--container", help="Azure Blob Storage container name")
parser.add_argument("--storagekey", required=False,
help="Optional. Use this Azure Blob Storage account key instead of the current user identity to login (use az login to set current user for Azure)")
parser.add_argument("--tenantid", required=False,
help="Optional. Use this to define the Azure directory where to authenticate)")
parser.add_argument("--searchservice",
help="Name of the Azure Cognitive Search service where content should be indexed (must exist already)")
parser.add_argument("--index",
help="Name of the Azure Cognitive Search index where content should be indexed (will be created if it doesn't exist)")
parser.add_argument("--searchkey", required=False,
help="Optional. Use this Azure Cognitive Search account key instead of the current user identity to login (use az login to set current user for Azure)")
parser.add_argument("--remove", action="store_true",
help="Remove references to this document from blob storage and the search index")
parser.add_argument("--removeall", action="store_true",
help="Remove all blobs from blob storage and documents from the search index")
parser.add_argument("--localpdfparser", action="store_true",
help="Use PyPdf local PDF parser (supports only digital PDFs) instead of Azure Form Recognizer service to extract text, tables and layout from the documents")
parser.add_argument("--formrecognizerservice", required=False,
help="Optional. Name of the Azure Form Recognizer service which will be used to extract text, tables and layout from the documents (must exist already)")
parser.add_argument("--formrecognizerkey", required=False,
help="Optional. Use this Azure Form Recognizer account key instead of the current user identity to login (use az login to set current user for Azure)")
parser.add_argument("--skipvectorization", help="Skip vectorization of document content")
parser.add_argument("--openAIService", required=False, help="Azure OpenAI service resource name")
parser.add_argument("--openAIKey", required=False, help="OpenAI API key")
parser.add_argument("--openAIEngine", required=False, help="OpenAI embeddings model engine name")
parser.add_argument("--openAITokenLimit", required=False, help="The max token limit for requests to the specidied OpenAI embeddings model")
parser.add_argument("--openAIDimensions", required=False,
help="The max number of dimensions allowed for an embeddings request to the specified OpenAI model")
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
args = parser.parse_args()
# Use the current user identity to connect to Azure services unless a key is explicitly set for any of them
azd_credential = AzureDeveloperCliCredential() if args.tenantid == None else AzureDeveloperCliCredential(
tenant_id=args.tenantid)
default_creds = azd_credential if args.searchkey == None or args.storagekey == None else None
search_creds = default_creds if args.searchkey == None and default_creds is not None else AzureKeyCredential(args.searchkey)
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"
skipvectorization = True if args.skipvectorization.lower() == "true" else False if args.skipvectorization.lower() == "false" else None
if skipvectorization is None:
raise ValueError("skipvectorization must be 'true' or 'false'")
if not args.skipblobs:
storage_creds = default_creds if args.storagekey == None else args.storagekey
if not args.localpdfparser:
# check if Azure Form Recognizer credentials are provided
if args.formrecognizerservice == None:
print(
"Error: Azure Form Recognizer service is not provided. Please provide formrecognizerservice or use --localpdfparser for local pypdf parser.")
exit(1)
formrecognizer_creds = default_creds if args.formrecognizerkey == None and default_creds is not None else AzureKeyCredential(
args.formrecognizerkey)
def blob_name_from_file_page(filename, page=0):
if os.path.splitext(filename)[1].lower() == ".pdf":
return os.path.splitext(os.path.basename(filename))[0] + f"-{page}" + ".pdf"
else:
return os.path.basename(filename)
def upload_blobs(filename):
blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net",
credential=storage_creds)
blob_container = blob_service.get_container_client(args.container)
if not blob_container.exists():
blob_container.create_container()
# if file is PDF split into pages and upload each page as a separate blob
if os.path.splitext(filename)[1].lower() == ".pdf":
reader = PdfReader(filename)
pages = reader.pages
for i in range(len(pages)):
blob_name = blob_name_from_file_page(filename, i)
if args.verbose: print(f"\tUploading blob for page {i} -> {blob_name}")
f = io.BytesIO()
writer = PdfWriter()
writer.add_page(pages[i])
writer.write(f)
f.seek(0)
blob_container.upload_blob(blob_name, f, overwrite=True)
else:
blob_name = blob_name_from_file_page(filename)
with open(filename, "rb") as data:
blob_container.upload_blob(blob_name, data, overwrite=True)
def remove_blobs(filename):
if args.verbose: print(f"Removing blobs for '{filename or '<all>'}'")
blob_service = BlobServiceClient(account_url=f"https://{args.storageaccount}.blob.core.windows.net",
credential=storage_creds)
blob_container = blob_service.get_container_client(args.container)
if blob_container.exists():
if filename == None:
blobs = blob_container.list_blob_names()
else:
prefix = os.path.splitext(os.path.basename(filename))[0]
blobs = filter(lambda b: re.match(f"{prefix}-\d+\.pdf", b), blob_container.list_blob_names(
name_starts_with=os.path.splitext(os.path.basename(prefix))[0]))
for b in blobs:
if args.verbose: print(f"\tRemoving blob {b}")
blob_container.delete_blob(b)
def table_to_html(table):
table_html = "<table>"
rows = [sorted([cell for cell in table.cells if cell.row_index == i], key=lambda cell: cell.column_index) for i in
range(table.row_count)]
for row_cells in rows:
table_html += "<tr>"
for cell in row_cells:
tag = "th" if (cell.kind == "columnHeader" or cell.kind == "rowHeader") else "td"
cell_spans = ""
if cell.column_span > 1: cell_spans += f" colSpan={cell.column_span}"
if cell.row_span > 1: cell_spans += f" rowSpan={cell.row_span}"
table_html += f"<{tag}{cell_spans}>{html.escape(cell.content)}</{tag}>"
table_html += "</tr>"
table_html += "</table>"
return table_html
| |
143284
|
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import openai\n",
"from azure.identity import DefaultAzureCredential\n",
"from azure.search.documents import SearchClient\n",
"from azure.search.documents.models import QueryType\n",
"\n",
"# Replace these with your own values, either in environment variables or directly here\n",
"AZURE_STORAGE_ACCOUNT = os.environ.get(\"AZURE_STORAGE_ACCOUNT\") or \"mystorageaccount\"\n",
"AZURE_STORAGE_CONTAINER = os.environ.get(\"AZURE_STORAGE_CONTAINER\") or \"content\"\n",
"AZURE_SEARCH_SERVICE = os.environ.get(\"AZURE_SEARCH_SERVICE\") or \"gptkb\"\n",
"AZURE_SEARCH_INDEX = os.environ.get(\"AZURE_SEARCH_INDEX\") or \"gptkbindex\"\n",
"AZURE_OPENAI_SERVICE = os.environ.get(\"AZURE_OPENAI_SERVICE\") or \"myopenai\"\n",
"AZURE_OPENAI_GPT_DEPLOYMENT = os.environ.get(\"AZURE_OPENAI_GPT_DEPLOYMENT\") or \"davinci\"\n",
"AZURE_OPENAI_CHATGPT_DEPLOYMENT = os.environ.get(\"AZURE_OPENAI_CHATGPT_DEPLOYMENT\") or \"chat\"\n",
"\n",
"KB_FIELDS_CONTENT = os.environ.get(\"KB_FIELDS_CONTENT\") or \"content\"\n",
"KB_FIELDS_CATEGORY = os.environ.get(\"KB_FIELDS_CATEGORY\") or \"category\"\n",
"KB_FIELDS_SOURCEPAGE = os.environ.get(\"KB_FIELDS_SOURCEPAGE\") or \"sourcepage\"\n",
"\n",
"# Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed, \n",
"# just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the \n",
"# keys for each service\n",
"azure_credential = DefaultAzureCredential()\n",
"\n",
"# Used by the OpenAI SDK\n",
"openai.api_type = \"azure\"\n",
"openai.api_base = f\"https://{AZURE_OPENAI_SERVICE}.openai.azure.com\"\n",
"openai.api_version = \"2022-12-01\"\n",
"\n",
"# Comment these two lines out if using keys, set your API key in the OPENAI_API_KEY environment variable instead\n",
"openai.api_type = \"azure_ad\"\n",
"openai.api_key = azure_credential.get_token(\"https://cognitiveservices.azure.com/.default\").token\n",
"\n",
"# Set up clients for Cognitive Search and Storage\n",
"search_client = SearchClient(\n",
" endpoint=f\"https://{AZURE_SEARCH_SERVICE}.search.windows.net\",\n",
" index_name=AZURE_SEARCH_INDEX,\n",
" credential=azure_credential)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# ChatGPT uses a particular set of tokens to indicate turns in conversations\n",
"prompt_prefix = \"\"\"<|im_start|>system\n",
"Assistant helps the company employees with their healthcare plan questions and employee handbook questions. \n",
"Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. \n",
"Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brakets to reference the source, e.g. [info1.txt]. Don't combine sources, list each source separately, e.g. [info1.txt][info2.pdf].\n",
"\n",
"Sources:\n",
"{sources}\n",
"\n",
"<|im_end|>\"\"\"\n",
"\n",
"turn_prefix = \"\"\"\n",
"<|im_start|>user\n",
"\"\"\"\n",
"\n",
"turn_suffix = \"\"\"\n",
"<|im_end|>\n",
"<|im_start|>assistant\n",
"\"\"\"\n",
"\n",
"prompt_history = turn_prefix\n",
"\n",
"history = []\n",
"\n",
"summary_prompt_template = \"\"\"Below is a summary of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. Generate a search query based on the conversation and the new question. Source names are not good search terms to include in the search query.\n",
"\n",
"Summary:\n",
"{summary}\n",
"\n",
"Question:\n",
"{question}\n",
"\n",
"Search query:\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Execute this cell multiple times updating user_input to accumulate chat history\n",
"user_input = \"Does my plan cover annual eye exams?\"\n",
"\n",
"# Exclude category, to simulate scenarios where there's a set of docs you can't see\n",
"exclude_category = None\n",
"\n",
"if len(history) > 0:\n",
" completion = openai.Completion.create(\n",
" engine=AZURE_OPENAI_GPT_DEPLOYMENT,\n",
" prompt=summary_prompt_template.format(summary=\"\\n\".join(history), question=user_input),\n",
" temperature=0.7,\n",
" max_tokens=32,\n",
" stop=[\"\\n\"])\n",
" search = completion.choices[0].text\n",
"else:\n",
" search = user_input\n",
"\n",
"# Alternatively simply use search_client.search(q, top=3) if not using semantic search\n",
"print(\"Searching:\", search)\n",
"print(\"-------------------\")\n",
"filter = \"category ne '{}'\".format(exclude_category.replace(\"'\", \"''\")) if exclude_category else None\n",
"r = search_client.search(search, \n",
" filter=filter,\n",
" query_type=QueryType.SEMANTIC, \n",
" query_language=\"en-us\", \n",
" query_speller=\"lexicon\", \n",
" semantic_configuration_name=\"default\", \n",
" top=3)\n",
"results = [doc[KB_FIELDS_SOURCEPAGE] + \": \" + doc[KB_FIELDS_CONTENT].replace(\"\\n\", \"\").replace(\"\\r\", \"\") for doc in r]\n",
"content = \"\\n\".join(results)\n",
"\n",
"prompt = prompt_prefix.format(sources=content) + prompt_history + user_input + turn_suffix\n",
"\n",
"completion = openai.Completion.create(\n",
" engine=AZURE_OPENAI_CHATGPT_DEPLOYMENT, \n",
" prompt=prompt, \n",
" temperature=0.7, \n",
" max_tokens=1024,\n",
" stop=[\"<|im_end|>\", \"<|im_start|>\"])\n",
"\n",
"prompt_history += user_input + turn_suffix + completion.choices[0].text + \"\\n<|im_end|>\" + turn_prefix\n",
"history.append(\"user: \" + user_input)\n",
"history.append(\"assistant: \" + completion.choices[0].text)\n",
"\n",
"print(\"\\n-------------------\\n\".join(history))\n",
"print(\"\\n-------------------\\nPrompt:\\n\" + prompt)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.10"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "c40b9fc8dfc687e53ddb074d322e19207ef9cf3db51c580aef67976913dea803"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| |
143306
|
import json
import os
import tempfile
import uuid
import logging
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import ResourceNotFoundError
from azure.search.documents.indexes.aio import SearchIndexClient
from azure.search.documents.aio import SearchClient
from azure.search.documents.indexes.models import (
ComplexField,
CorsOptions,
SearchIndex,
ScoringProfile,
SearchFieldDataType,
SimpleField,
SearchableField
)
temp_path= "/tmp/"
logger = logging.getLogger('azure.search')
logger.setLevel(logging.DEBUG)
class AzureSearchClient(object):
def __init__(self):
self._service_endpoint = os.getenv("SEARCH_ENDPOINT")
key = os.getenv("SEARCH_API_KEY")
self._credential = AzureKeyCredential(key)
self._admin_client = SearchIndexClient(endpoint=self._service_endpoint, credential=self._credential, logging_enable=True)
self._index_client_map = {}
async def close(self):
await self._admin_client.close()
for index_name, search_client in self._index_client_map.items( ):
await search_client.close()
async def create_index(self, index_name:str):
print(f"Creating index: {index_name}")
name = index_name
fields = [
SimpleField(name="id", type=SearchFieldDataType.String, filterable=True, sortable=True, key=True),
SimpleField(name="date", type=SearchFieldDataType.DateTimeOffset, filterable=True),
SimpleField(name="location", type=SearchFieldDataType.String, filterable=True),
SimpleField(name="profile_name", type=SearchFieldDataType.String),
SimpleField(name="rating", type=SearchFieldDataType.String, filterable=True),
SearchableField(name="review_title", type=SearchFieldDataType.String, analyzer_name='en.microsoft'),
SimpleField(name="stars", type=SearchFieldDataType.String, filterable=True),
SimpleField(name="sentiment_aspects_NL", type="Collection(Edm.String)", filterable=True),
SimpleField(name="aspects", type="Collection(Edm.String)", filterable=True),
SimpleField(name="aspects_and_sentiments", type="Collection(Edm.String)", filterable=True),
SearchableField(name="review_text", type=SearchFieldDataType.String, analyzer_name='en.microsoft')
]
cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60)
scoring_profiles = []
index = SearchIndex(
name=name,
fields=fields,
scoring_profiles=scoring_profiles,
cors_options=cors_options)
try:
result = await self._admin_client.create_index(index)
print(f"Created index: {result}")
print(f"Created index fields: {result.fields}")
except Exception as e:
print (e)
raise Exception(f"Failed to create index {index_name}")
# Preload search client for each index
self._index_client_map[index_name] = self._admin_client.get_search_client(index_name)
return True
async def exists_index(self, index_name:str):
try:
await self._admin_client.get_index(index_name)
self._index_client_map[index_name] = self._admin_client.get_search_client(index_name)
return True
except ResourceNotFoundError as ex:
return False
async def list_index(self):
return [x for x in self._index_client_map.keys()]
async def delete_index(self, index_name:str):
try:
result = await self._admin_client.delete_index(index_name)
self._index_client_map.pop(index_name)
return True
except:
return False
async def upload_documents(self, index_name:str, documents:list[dict]):
if await self.exists_index(index_name):
index_client = self._index_client_map[index_name]
print (index_client)
try:
result = await index_client.upload_documents(documents)
except Exception as e:
print (e)
print (result)
return result[0].succeeded
return False
async def search_documents(self, index_name:str, parameters:dict, top = 3):
if await self.exists_index(index_name=index_name):
index_client = self._index_client_map[index_name]
print (f"Index found: {index_name}")
try:
print("Search client search documents parameters {}".format(parameters))
documents = []
async for document in await index_client.search(search_text= parameters['search_text'], filter = parameters['filter_text'], top = top, include_total_count=True):
documents.append(document)
print("Num documents fetched from Az Search: {}".format(len(documents)))
return documents
except Exception as e:
print (e)
return None
| |
143349
|
ow
- The following figure shows an overview of the solution

## Workflow components
* Topic Classifier: This is responsible to determine the topic of the user input. The topic not only includes the business-related concepts but extends to include identifying if the input is a chitchat and if it is irrelevant to the scope of this virtual assistant. It also can be tuned to determine state like when the conversation is coming to an end. The topic classifier is a few shot model that uses the text-davinci-003 GPT base model. This
model is effective for classification tasks and is very powerful. It might be worth noting that a smaller, and cheaper model could be developed for a specific enterprise using their own data to perform the topic classification task.
* Text Generation: This component leverages ChatGPT to both understand the context and generate personalized responses to the current conversation. The exact prompt and how to form it will be discussed later in this file. The ChatGPT, using gpt-3.5-turbo as the base model, is used in this demonstration. It leverages the ability to both extract relevant information from the prompt and respond to a multi-turn conversation appropriately. The full context of the user and bot interaction, relevant to the current topic, is used in the prompt in addition to the relevant information surfaced from a knowledge base for the specific topic.
In addition, a dynamic tone modulator could be added, to help guide the tone and dynamic responses (through changing the ChatGPT parameters) in response to the user input and the current state. This would be injected into the prompt to help tune the output accordingly. In the current prompt used in this demo the tone is preset in the prompt to be "friendly".
# Run Time Architecture
The following figure illustrates the architecture of the runtime solution that enables the virtual assistant solution.

## Architecture Details
The runtime architecture of the chatbot includes a few components that enable a chatbot implementation. The core components includes:
* Orchestration: This module interfaces with the bot layer to exchange the user input as well as the response. The orchestrator retains the conversation state that is assessed during the dialog. This includes the topic of the current input as well as the full
transcript and session history. The transcript includes the response by ChatGPT in addition to the user input. This history is used in the generation of the prompt with the relevant context. The orchestrator also has access to the appropriate knowledge base to include in the prompt. With the relevant data, the orchestrator calls the model manager module to perform the API call to GPT or to ChatGPT.
* Model Manager: This module is responsible for forming the prompt, with the information elements passed to the model manager as well as the context and prompting information. The model manager combines the different pieces to provide a personalized prompt with the right context and transcript. The model manager also understands how to parse the ChatGPT response, and extract only one response that it would share with the orchestration layer to send to the user and retain in the session memory store.
* Cognition: This handles the GPT or ChatGPT API calls based on the prompt passed from the model manager. It returns the response to the manager to parse.
* Bot layer: This is a shell layer for managing the user interaction. For the purpose of this demonstration, this bot shell is based on the Bot Framework.
* User Interaction layer: This is a web app that hosts web chat control that is connected to the bot layer via web chat channel. User uses the web chat control to interact with the bot. The web app also has minimal controls to aid in this demo.
* Data Store: The user data and the knowledge base is stored in a Azure Cosmos DB, and is retrieved based on the user selected and/or the topic of the user input.
* Session history: The session history, for each user for a current session (identified by a consistent conversationID) is retained in an Azure Cosmos DB. The user input, in addition to the topic identified, as well as the bot response are all retained in the session history. This history is used in multiturn conversation to provide ChatGPT with context if needed.
# Exampl
| |
144141
|
import { AsyncCaller, AsyncCallerParams } from "./utils/async_caller.js";
/**
* The parameters required to initialize an instance of the Embeddings
* class.
*/
export type EmbeddingsParams = AsyncCallerParams;
export interface EmbeddingsInterface {
/**
* An abstract method that takes an array of documents as input and
* returns a promise that resolves to an array of vectors for each
* document.
* @param documents An array of documents to be embedded.
* @returns A promise that resolves to an array of vectors for each document.
*/
embedDocuments(documents: string[]): Promise<number[][]>;
/**
* An abstract method that takes a single document as input and returns a
* promise that resolves to a vector for the query document.
* @param document A single document to be embedded.
* @returns A promise that resolves to a vector for the query document.
*/
embedQuery(document: string): Promise<number[]>;
}
/**
* An abstract class that provides methods for embedding documents and
* queries using LangChain.
*/
export abstract class Embeddings implements EmbeddingsInterface {
/**
* The async caller should be used by subclasses to make any async calls,
* which will thus benefit from the concurrency and retry logic.
*/
caller: AsyncCaller;
constructor(params: EmbeddingsParams) {
this.caller = new AsyncCaller(params ?? {});
}
/**
* An abstract method that takes an array of documents as input and
* returns a promise that resolves to an array of vectors for each
* document.
* @param documents An array of documents to be embedded.
* @returns A promise that resolves to an array of vectors for each document.
*/
abstract embedDocuments(documents: string[]): Promise<number[][]>;
/**
* An abstract method that takes a single document as input and returns a
* promise that resolves to a vector for the query document.
* @param document A single document to be embedded.
* @returns A promise that resolves to a vector for the query document.
*/
abstract embedQuery(document: string): Promise<number[]>;
}
| |
144145
|
/**
* Type alias for a record where the keys are strings and the values can
* be any type. This is used to represent the input values for a Chain.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type InputValues = Record<string, any>;
/**
* Type alias for a record where the keys are strings and the values can
* be any type. This is used to represent the output values from a Chain.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type OutputValues = Record<string, any>;
/**
* Type alias for a record where the keys are strings and the values can
* be any type. This is used to represent the memory variables in a Chain.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type MemoryVariables = Record<string, any>;
/**
* Abstract base class for memory in LangChain's Chains. Memory refers to
* the state in Chains. It can be used to store information about past
* executions of a Chain and inject that information into the inputs of
* future executions of the Chain.
*/
export abstract class BaseMemory {
abstract get memoryKeys(): string[];
/**
* Abstract method that should take an object of input values and return a
* Promise that resolves with an object of memory variables. The
* implementation of this method should load the memory variables from the
* provided input values.
* @param values An object of input values.
* @returns Promise that resolves with an object of memory variables.
*/
abstract loadMemoryVariables(values: InputValues): Promise<MemoryVariables>;
/**
* Abstract method that should take two objects, one of input values and
* one of output values, and return a Promise that resolves when the
* context has been saved. The implementation of this method should save
* the context based on the provided input and output values.
* @param inputValues An object of input values.
* @param outputValues An object of output values.
* @returns Promise that resolves when the context has been saved.
*/
abstract saveContext(
inputValues: InputValues,
outputValues: OutputValues
): Promise<void>;
}
const getValue = (values: InputValues | OutputValues, key?: string) => {
if (key !== undefined) {
return values[key];
}
const keys = Object.keys(values);
if (keys.length === 1) {
return values[keys[0]];
}
};
/**
* This function is used by memory classes to select the input value
* to use for the memory. If there is only one input value, it is used.
* If there are multiple input values, the inputKey must be specified.
*/
export const getInputValue = (inputValues: InputValues, inputKey?: string) => {
const value = getValue(inputValues, inputKey);
if (!value) {
const keys = Object.keys(inputValues);
throw new Error(
`input values have ${keys.length} keys, you must specify an input key or pass only 1 key as input`
);
}
return value;
};
/**
* This function is used by memory classes to select the output value
* to use for the memory. If there is only one output value, it is used.
* If there are multiple output values, the outputKey must be specified.
* If no outputKey is specified, an error is thrown.
*/
export const getOutputValue = (
outputValues: OutputValues,
outputKey?: string
) => {
const value = getValue(outputValues, outputKey);
if (!value) {
const keys = Object.keys(outputValues);
throw new Error(
`output values have ${keys.length} keys, you must specify an output key or pass only 1 key as output`
);
}
return value;
};
/**
* Function used by memory classes to get the key of the prompt input,
* excluding any keys that are memory variables or the "stop" key. If
* there is not exactly one prompt input key, an error is thrown.
*/
export function getPromptInputKey(
inputs: Record<string, unknown>,
memoryVariables: string[]
): string {
const promptInputKeys = Object.keys(inputs).filter(
(key) => !memoryVariables.includes(key) && key !== "stop"
);
if (promptInputKeys.length !== 1) {
throw new Error(
`One input key expected, but got ${promptInputKeys.length}`
);
}
return promptInputKeys[0];
}
| |
144169
|
import { test, expect } from "@jest/globals";
import { calculateMaxTokens, getModelContextSize } from "../base.js";
test("properly calculates correct max tokens", async () => {
expect(
await calculateMaxTokens({ prompt: "", modelName: "gpt-3.5-turbo-16k" })
).toBe(16384);
expect(
await calculateMaxTokens({
prompt: "",
modelName: "gpt-3.5-turbo-16k-0613",
})
).toBe(16384);
expect(
await calculateMaxTokens({ prompt: "", modelName: "gpt-3.5-turbo" })
).toBe(4096);
expect(await calculateMaxTokens({ prompt: "", modelName: "gpt-4" })).toBe(
8192
);
expect(await calculateMaxTokens({ prompt: "", modelName: "gpt-4-32k" })).toBe(
32768
);
});
test("properly gets model context size", async () => {
expect(await getModelContextSize("gpt-3.5-turbo-16k")).toBe(16384);
expect(await getModelContextSize("gpt-3.5-turbo-16k-0613")).toBe(16384);
expect(await getModelContextSize("gpt-3.5-turbo")).toBe(4096);
expect(await getModelContextSize("gpt-4")).toBe(8192);
expect(await getModelContextSize("gpt-4-32k")).toBe(32768);
});
| |
144184
|
export function mapStoredMessagesToChatMessages(
messages: StoredMessage[]
): BaseMessage[] {
return messages.map(mapStoredMessageToChatMessage);
}
/**
* Transforms an array of `BaseMessage` instances into an array of
* `StoredMessage` instances. It does this by calling the `toDict` method
* on each `BaseMessage`, which returns a `StoredMessage`. This function
* is used to prepare chat messages for storage.
*/
export function mapChatMessagesToStoredMessages(
messages: BaseMessage[]
): StoredMessage[] {
return messages.map((message) => message.toDict());
}
export function convertToChunk(message: BaseMessage) {
const type = message._getType();
if (type === "human") {
// eslint-disable-next-line @typescript-eslint/no-use-before-define
return new HumanMessageChunk({ ...message });
} else if (type === "ai") {
let aiChunkFields: AIMessageChunkFields = {
...message,
};
if ("tool_calls" in aiChunkFields) {
aiChunkFields = {
...aiChunkFields,
tool_call_chunks: aiChunkFields.tool_calls?.map((tc) => ({
...tc,
type: "tool_call_chunk",
index: undefined,
args: JSON.stringify(tc.args),
})),
};
}
// eslint-disable-next-line @typescript-eslint/no-use-before-define
return new AIMessageChunk({ ...aiChunkFields });
} else if (type === "system") {
// eslint-disable-next-line @typescript-eslint/no-use-before-define
return new SystemMessageChunk({ ...message });
} else if (type === "function") {
// eslint-disable-next-line @typescript-eslint/no-use-before-define
return new FunctionMessageChunk({ ...message });
// eslint-disable-next-line @typescript-eslint/no-use-before-define
} else if (ChatMessage.isInstance(message)) {
// eslint-disable-next-line @typescript-eslint/no-use-before-define
return new ChatMessageChunk({ ...message });
} else {
throw new Error("Unknown message type.");
}
}
| |
144196
|
import {
BaseMessage,
BaseMessageChunk,
mergeContent,
_mergeDicts,
type MessageType,
} from "./base.js";
/**
* Represents a system message in a conversation.
*/
export class SystemMessage extends BaseMessage {
static lc_name() {
return "SystemMessage";
}
_getType(): MessageType {
return "system";
}
}
/**
* Represents a chunk of a system message, which can be concatenated with
* other system message chunks.
*/
export class SystemMessageChunk extends BaseMessageChunk {
static lc_name() {
return "SystemMessageChunk";
}
_getType(): MessageType {
return "system";
}
concat(chunk: SystemMessageChunk) {
return new SystemMessageChunk({
content: mergeContent(this.content, chunk.content),
additional_kwargs: _mergeDicts(
this.additional_kwargs,
chunk.additional_kwargs
),
response_metadata: _mergeDicts(
this.response_metadata,
chunk.response_metadata
),
id: this.id ?? chunk.id,
});
}
}
export function isSystemMessage(x: BaseMessage): x is SystemMessage {
return x._getType() === "system";
}
export function isSystemMessageChunk(
x: BaseMessageChunk
): x is SystemMessageChunk {
return x._getType() === "system";
}
| |
144197
|
import { it, describe, test, expect } from "@jest/globals";
import {
filterMessages,
mergeMessageRuns,
trimMessages,
} from "../transformers.js";
import { AIMessage } from "../ai.js";
import { ChatMessage } from "../chat.js";
import { HumanMessage } from "../human.js";
import { SystemMessage } from "../system.js";
import { BaseMessage } from "../base.js";
import {
getBufferString,
mapChatMessagesToStoredMessages,
mapStoredMessagesToChatMessages,
} from "../utils.js";
describe("filterMessage", () => {
const getMessages = () => [
new SystemMessage("you're a good assistant."),
new HumanMessage({
content: "what's your name",
id: "foo",
name: "example_user",
}),
new AIMessage({ content: "steve-o", id: "bar", name: "example_assistant" }),
new HumanMessage({ content: "what's your favorite color", id: "baz" }),
new AIMessage({ content: "silicon blue", id: "blah" }),
];
it("works", () => {
const messages = getMessages();
const filteredMessages = filterMessages(messages, {
includeNames: ["example_user", "example_assistant"],
includeTypes: ["system"],
excludeIds: ["bar"],
});
expect(filteredMessages).toEqual([
new SystemMessage("you're a good assistant."),
new HumanMessage({
content: "what's your name",
id: "foo",
name: "example_user",
}),
]);
});
it("can filter messages based on class types", () => {
const messages = getMessages();
const filteredMessages = filterMessages(messages, {
includeTypes: [HumanMessage, AIMessage],
});
expect(filteredMessages).toHaveLength(4);
expect(filteredMessages).toEqual([
new HumanMessage({
content: "what's your name",
id: "foo",
name: "example_user",
}),
new AIMessage({
content: "steve-o",
id: "bar",
name: "example_assistant",
}),
new HumanMessage({ content: "what's your favorite color", id: "baz" }),
new AIMessage({ content: "silicon blue", id: "blah" }),
]);
});
it("returns a runnable if no messages are passed", () => {
const filteredMessagesRunnable = filterMessages();
expect(filteredMessagesRunnable).toBeDefined();
expect(filteredMessagesRunnable.lc_namespace).toEqual([
"langchain_core",
"runnables",
]);
expect("func" in filteredMessagesRunnable).toBeTruthy();
// `func` is protected, so we need to cast it to any to access it
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(typeof (filteredMessagesRunnable as any).func).toBe("function");
});
});
describe("mergeMessageRuns", () => {
const getMessages = () => [
new SystemMessage("you're a good assistant."),
new HumanMessage({ content: "what's your favorite color", id: "foo" }),
new HumanMessage({ content: "wait your favorite food", id: "bar" }),
new AIMessage({
content: "my favorite colo",
tool_calls: [{ name: "blah_tool", args: { x: 2 }, id: "123" }],
id: "baz",
}),
new AIMessage({
content: [{ type: "text", text: "my favorite dish is lasagna" }],
tool_calls: [{ name: "blah_tool", args: { x: -10 }, id: "456" }],
id: "blur",
}),
];
it("works", () => {
const messages = getMessages();
const mergedMessages = mergeMessageRuns(messages);
expect(mergedMessages).toHaveLength(3);
expect(mergedMessages).toEqual([
new SystemMessage("you're a good assistant."),
new HumanMessage({
content: "what's your favorite color\nwait your favorite food",
id: "foo",
}),
new AIMessage({
content: [
{ type: "text", text: "my favorite colo" },
{ type: "text", text: "my favorite dish is lasagna" },
],
tool_calls: [
{ name: "blah_tool", args: { x: 2 }, id: "123", type: "tool_call" },
{ name: "blah_tool", args: { x: -10 }, id: "456", type: "tool_call" },
],
id: "baz",
}),
]);
});
it("returns a runnable if no messages are passed", () => {
const mergedMessages = mergeMessageRuns();
expect(mergedMessages).toBeDefined();
expect(mergedMessages.lc_namespace).toEqual([
"langchain_core",
"runnables",
]);
expect("func" in mergedMessages).toBeTruthy();
// `func` is protected, so we need to cast it to any to access it
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(typeof (mergedMessages as any).func).toBe("function");
});
});
| |
144198
|
describe("trimMessages can trim", () => {
const messagesAndTokenCounterFactory = () => {
const messages = [
new SystemMessage(
"This is a 4 token text. The full message is 10 tokens."
),
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "first",
}),
new AIMessage({
content: [
{ type: "text", text: "This is the FIRST 4 token block." },
{ type: "text", text: "This is the SECOND 4 token block." },
],
id: "second",
}),
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "third",
}),
new AIMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "fourth",
}),
];
const dummyTokenCounter = (messages: BaseMessage[]): number => {
// treat each message like it adds 3 default tokens at the beginning
// of the message and at the end of the message. 3 + 4 + 3 = 10 tokens
// per message.
const defaultContentLen = 4;
const defaultMsgPrefixLen = 3;
const defaultMsgSuffixLen = 3;
let count = 0;
for (const msg of messages) {
if (typeof msg.content === "string") {
count +=
defaultMsgPrefixLen + defaultContentLen + defaultMsgSuffixLen;
}
if (Array.isArray(msg.content)) {
count +=
defaultMsgPrefixLen +
msg.content.length * defaultContentLen +
defaultMsgSuffixLen;
}
}
return count;
};
return {
messages,
dummyTokenCounter,
};
};
it("First 30 tokens, not allowing partial messages", async () => {
const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory();
const trimmedMessages = await trimMessages(messages, {
maxTokens: 30,
tokenCounter: dummyTokenCounter,
strategy: "first",
});
expect(trimmedMessages).toHaveLength(2);
expect(trimmedMessages).toEqual([
new SystemMessage(
"This is a 4 token text. The full message is 10 tokens."
),
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "first",
}),
]);
});
it("First 30 tokens, allowing partial messages", async () => {
const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory();
const trimmedMessages = await trimMessages(messages, {
maxTokens: 30,
tokenCounter: dummyTokenCounter,
strategy: "first",
allowPartial: true,
});
expect(trimmedMessages).toHaveLength(3);
expect(trimmedMessages).toEqual([
new SystemMessage(
"This is a 4 token text. The full message is 10 tokens."
),
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "first",
}),
new AIMessage({
content: [{ type: "text", text: "This is the FIRST 4 token block." }],
id: "second",
}),
]);
});
it("First 30 tokens, allowing partial messages, have to end on HumanMessage", async () => {
const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory();
const trimmedMessages = await trimMessages(messages, {
maxTokens: 30,
tokenCounter: dummyTokenCounter,
strategy: "first",
allowPartial: true,
endOn: "human",
});
expect(trimmedMessages).toHaveLength(2);
expect(trimmedMessages).toEqual([
new SystemMessage(
"This is a 4 token text. The full message is 10 tokens."
),
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "first",
}),
]);
});
it("Last 30 tokens, including system message, not allowing partial messages", async () => {
const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory();
const trimmedMessages = await trimMessages(messages, {
maxTokens: 30,
includeSystem: true,
tokenCounter: dummyTokenCounter,
strategy: "last",
});
expect(trimmedMessages).toHaveLength(3);
expect(trimmedMessages).toEqual([
new SystemMessage(
"This is a 4 token text. The full message is 10 tokens."
),
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "third",
}),
new AIMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "fourth",
}),
]);
});
it("Last 40 tokens, including system message, allowing partial messages", async () => {
const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory();
const trimmedMessages = await trimMessages(messages, {
maxTokens: 40,
tokenCounter: dummyTokenCounter,
strategy: "last",
allowPartial: true,
includeSystem: true,
});
expect(trimmedMessages).toHaveLength(4);
expect(trimmedMessages).toEqual([
new SystemMessage(
"This is a 4 token text. The full message is 10 tokens."
),
new AIMessage({
content: [{ type: "text", text: "This is the FIRST 4 token block." }],
id: "second",
}),
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "third",
}),
new AIMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "fourth",
}),
]);
});
it("Last 30 tokens, including system message, allowing partial messages, end on HumanMessage", async () => {
const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory();
const trimmedMessages = await trimMessages(messages, {
maxTokens: 30,
tokenCounter: dummyTokenCounter,
strategy: "last",
endOn: "human",
includeSystem: true,
allowPartial: true,
});
expect(trimmedMessages).toHaveLength(3);
expect(trimmedMessages).toEqual([
new SystemMessage(
"This is a 4 token text. The full message is 10 tokens."
),
new AIMessage({
content: [{ type: "text", text: "This is the FIRST 4 token block." }],
id: "second",
}),
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "third",
}),
]);
});
it("Last 40 tokens, including system message, allowing partial messages, start on HumanMessage", async () => {
const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory();
const trimmedMessages = await trimMessages(messages, {
maxTokens: 40,
tokenCounter: dummyTokenCounter,
strategy: "last",
includeSystem: true,
allowPartial: true,
startOn: "human",
});
expect(trimmedMessages).toHaveLength(3);
console.log(trimmedMessages);
expect(trimmedMessages).toEqual([
new SystemMessage(
"This is a 4 token text. The full message is 10 tokens."
),
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "third",
}),
new AIMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "fourth",
}),
]);
});
it("can filter (startOn) with message classes", async () => {
const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory();
const trimmedMessages = await trimMessages(messages, {
maxTokens: 40,
tokenCounter: dummyTokenCounter,
startOn: [HumanMessage],
});
expect(trimmedMessages).toHaveLength(2);
expect(trimmedMessages).toEqual([
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "third",
}),
new AIMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "fourth",
}),
]);
});
| |
144199
|
it("can filter (endOn) with message classes", async () => {
const { messages, dummyTokenCounter } = messagesAndTokenCounterFactory();
const trimmedMessages = await trimMessages(messages, {
maxTokens: 40,
tokenCounter: dummyTokenCounter,
endOn: [HumanMessage],
});
console.log(trimmedMessages);
expect(trimmedMessages).toHaveLength(3);
expect(trimmedMessages).toEqual([
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "first",
}),
new AIMessage({
content: [
{ type: "text", text: "This is the FIRST 4 token block." },
{ type: "text", text: "This is the SECOND 4 token block." },
],
id: "second",
}),
new HumanMessage({
content: "This is a 4 token text. The full message is 10 tokens.",
id: "third",
}),
]);
});
it("can return a runnable if empty array is passed", () => {
const { dummyTokenCounter } = messagesAndTokenCounterFactory();
const trimmedMessages = trimMessages({
maxTokens: 40,
tokenCounter: dummyTokenCounter,
});
expect(trimmedMessages).toBeDefined();
expect(trimmedMessages.lc_namespace).toEqual([
"langchain_core",
"runnables",
]);
expect("func" in trimmedMessages).toBeTruthy();
// `func` is protected, so we need to cast it to any to access it
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(typeof (trimmedMessages as any).func).toBe("function");
});
});
test("getBufferString can handle complex messages", () => {
const messageArr1 = [new HumanMessage("Hello there!")];
const messageArr2 = [
new AIMessage({
content: [
{
type: "text",
text: "Hello there!",
},
],
}),
];
const messageArr3 = [
new HumanMessage({
content: [
{
type: "image_url",
image_url: {
url: "https://example.com/image.jpg",
},
},
{
type: "image_url",
image_url: "https://example.com/image.jpg",
},
],
}),
];
const bufferString1 = getBufferString(messageArr1);
expect(bufferString1).toBe("Human: Hello there!");
const bufferString2 = getBufferString(messageArr2);
expect(bufferString2).toBe(
`AI: ${JSON.stringify(
[
{
type: "text",
text: "Hello there!",
},
],
null,
2
)}`
);
const bufferString3 = getBufferString(messageArr3);
expect(bufferString3).toBe(
`Human: ${JSON.stringify(
[
{
type: "image_url",
image_url: {
url: "https://example.com/image.jpg",
},
},
{
type: "image_url",
image_url: "https://example.com/image.jpg",
},
],
null,
2
)}`
);
});
describe("chat message conversions", () => {
it("can convert a chat message to a stored message and back", () => {
const originalMessages = [
new ChatMessage("I'm a generic message!", "human"),
new HumanMessage("I'm a human message!"),
];
const storedMessages = mapChatMessagesToStoredMessages(originalMessages);
const convertedBackMessages =
mapStoredMessagesToChatMessages(storedMessages);
expect(convertedBackMessages).toEqual(originalMessages);
});
});
| |
144400
|
// Default generic "any" values are for backwards compatibility.
// Replace with "string" when we are comfortable with a breaking change.
import { BaseStringPromptTemplate } from "./string.js";
import type {
BasePromptTemplateInput,
TypedPromptInputValues,
} from "./base.js";
import {
checkValidTemplate,
parseTemplate,
renderTemplate,
type TemplateFormat,
} from "./template.js";
import type { SerializedPromptTemplate } from "./serde.js";
import type { InputValues, PartialValues } from "../utils/types/index.js";
import { MessageContent, MessageContentComplex } from "../messages/index.js";
/**
* Inputs to create a {@link PromptTemplate}
* @augments BasePromptTemplateInput
*/
export interface PromptTemplateInput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunInput extends InputValues = any,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
PartialVariableName extends string = any,
Format extends TemplateFormat = TemplateFormat
> extends BasePromptTemplateInput<RunInput, PartialVariableName> {
/**
* The prompt template
*/
template: MessageContent;
/**
* The format of the prompt template. Options are "f-string" and "mustache"
*/
templateFormat?: Format;
/**
* Whether or not to try validating the template on initialization
*
* @defaultValue `true`
*/
validateTemplate?: boolean;
/**
* Additional fields which should be included inside
* the message content array if using a complex message
* content.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
additionalContentFields?: MessageContentComplex;
}
type NonAlphanumeric =
| " "
| "\t"
| "\n"
| "\r"
| '"'
| "'"
| "{"
| "["
| "("
| "`"
| ":"
| ";";
/**
* Recursive type to extract template parameters from a string.
* @template T - The input string.
* @template Result - The resulting array of extracted template parameters.
*/
type ExtractTemplateParamsRecursive<
T extends string,
Result extends string[] = []
> = T extends `${string}{${infer Param}}${infer Rest}`
? Param extends `${NonAlphanumeric}${string}`
? ExtractTemplateParamsRecursive<Rest, Result> // for non-template variables that look like template variables e.g. see https://github.com/langchain-ai/langchainjs/blob/main/langchain/src/chains/query_constructor/prompt.ts
: ExtractTemplateParamsRecursive<Rest, [...Result, Param]>
: Result;
export type ParamsFromFString<T extends string> = {
[Key in
| ExtractTemplateParamsRecursive<T>[number]
// eslint-disable-next-line @typescript-eslint/no-explicit-any
| (string & Record<never, never>)]: any;
};
export type ExtractedFStringParams<
T extends string,
// eslint-disable-next-line @typescript-eslint/ban-types
RunInput extends InputValues = Symbol
// eslint-disable-next-line @typescript-eslint/ban-types
> = RunInput extends Symbol ? ParamsFromFString<T> : RunInput;
/**
* Schema to represent a basic prompt for an LLM.
* @augments BasePromptTemplate
* @augments PromptTemplateInput
*
* @example
* ```ts
* import { PromptTemplate } from "langchain/prompts";
*
* const prompt = new PromptTemplate({
* inputVariables: ["foo"],
* template: "Say {foo}",
* });
* ```
*/
| |
144401
|
export class PromptTemplate<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunInput extends InputValues = any,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
PartialVariableName extends string = any
>
extends BaseStringPromptTemplate<RunInput, PartialVariableName>
implements PromptTemplateInput<RunInput, PartialVariableName>
{
static lc_name() {
return "PromptTemplate";
}
template: MessageContent;
templateFormat: TemplateFormat = "f-string";
validateTemplate = true;
/**
* Additional fields which should be included inside
* the message content array if using a complex message
* content.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
additionalContentFields?: MessageContentComplex;
constructor(input: PromptTemplateInput<RunInput, PartialVariableName>) {
super(input);
// If input is mustache and validateTemplate is not defined, set it to false
if (
input.templateFormat === "mustache" &&
input.validateTemplate === undefined
) {
this.validateTemplate = false;
}
Object.assign(this, input);
if (this.validateTemplate) {
if (this.templateFormat === "mustache") {
throw new Error("Mustache templates cannot be validated.");
}
let totalInputVariables: string[] = this.inputVariables;
if (this.partialVariables) {
totalInputVariables = totalInputVariables.concat(
Object.keys(this.partialVariables)
);
}
checkValidTemplate(
this.template,
this.templateFormat,
totalInputVariables
);
}
}
_getPromptType(): "prompt" {
return "prompt";
}
/**
* Formats the prompt template with the provided values.
* @param values The values to be used to format the prompt template.
* @returns A promise that resolves to a string which is the formatted prompt.
*/
async format(values: TypedPromptInputValues<RunInput>): Promise<string> {
const allValues = await this.mergePartialAndUserVariables(values);
return renderTemplate(
this.template as string,
this.templateFormat,
allValues
);
}
/**
* Take examples in list format with prefix and suffix to create a prompt.
*
* Intended to be used a a way to dynamically create a prompt from examples.
*
* @param examples - List of examples to use in the prompt.
* @param suffix - String to go after the list of examples. Should generally set up the user's input.
* @param inputVariables - A list of variable names the final prompt template will expect
* @param exampleSeparator - The separator to use in between examples
* @param prefix - String that should go before any examples. Generally includes examples.
*
* @returns The final prompt template generated.
*/
static fromExamples(
examples: string[],
suffix: string,
inputVariables: string[],
exampleSeparator = "\n\n",
prefix = ""
) {
const template = [prefix, ...examples, suffix].join(exampleSeparator);
return new PromptTemplate({
inputVariables,
template,
});
}
/**
* Load prompt template from a template f-string
*/
static fromTemplate<
// eslint-disable-next-line @typescript-eslint/ban-types
RunInput extends InputValues = Symbol,
T extends string = string
>(
template: T,
options?: Omit<
PromptTemplateInput<RunInput, string, "f-string">,
"template" | "inputVariables"
>
): PromptTemplate<ExtractedFStringParams<T, RunInput>>;
static fromTemplate<
// eslint-disable-next-line @typescript-eslint/ban-types
RunInput extends InputValues = Symbol,
T extends string = string
>(
template: T,
options?: Omit<
PromptTemplateInput<RunInput, string>,
"template" | "inputVariables"
>
): PromptTemplate<ExtractedFStringParams<T, RunInput>>;
static fromTemplate<
// eslint-disable-next-line @typescript-eslint/ban-types
RunInput extends InputValues = Symbol,
T extends string = string
>(
template: T,
options?: Omit<
PromptTemplateInput<RunInput, string, "mustache">,
"template" | "inputVariables"
>
): PromptTemplate<InputValues>;
static fromTemplate<
// eslint-disable-next-line @typescript-eslint/ban-types
RunInput extends InputValues = Symbol,
T extends string = string
>(
template: T,
options?: Omit<
PromptTemplateInput<RunInput, string, TemplateFormat>,
"template" | "inputVariables"
>
): PromptTemplate<ExtractedFStringParams<T, RunInput> | InputValues> {
const { templateFormat = "f-string", ...rest } = options ?? {};
const names = new Set<string>();
parseTemplate(template, templateFormat).forEach((node) => {
if (node.type === "variable") {
names.add(node.name);
}
});
return new PromptTemplate({
// Rely on extracted types
// eslint-disable-next-line @typescript-eslint/no-explicit-any
inputVariables: [...names] as any[],
templateFormat,
template,
...rest,
});
}
/**
* Partially applies values to the prompt template.
* @param values The values to be partially applied to the prompt template.
* @returns A new instance of PromptTemplate with the partially applied values.
*/
async partial<NewPartialVariableName extends string>(
values: PartialValues<NewPartialVariableName>
) {
const newInputVariables = this.inputVariables.filter(
(iv) => !(iv in values)
) as Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>[];
const newPartialVariables = {
...(this.partialVariables ?? {}),
...values,
} as PartialValues<PartialVariableName | NewPartialVariableName>;
const promptDict = {
...this,
inputVariables: newInputVariables,
partialVariables: newPartialVariables,
};
return new PromptTemplate<
InputValues<
Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>
>
>(promptDict);
}
serialize(): SerializedPromptTemplate {
if (this.outputParser !== undefined) {
throw new Error(
"Cannot serialize a prompt template with an output parser"
);
}
return {
_type: this._getPromptType(),
input_variables: this.inputVariables,
template: this.template,
template_format: this.templateFormat,
};
}
static async deserialize(
data: SerializedPromptTemplate
): Promise<PromptTemplate> {
if (!data.template) {
throw new Error("Prompt template must have a template");
}
const res = new PromptTemplate({
inputVariables: data.input_variables,
template: data.template,
templateFormat: data.template_format,
});
return res;
}
// TODO(from file)
}
| |
144409
|
export class ChatPromptTemplate<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
RunInput extends InputValues = any,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
PartialVariableName extends string = any
>
extends BaseChatPromptTemplate<RunInput, PartialVariableName>
implements ChatPromptTemplateInput<RunInput, PartialVariableName>
{
static lc_name() {
return "ChatPromptTemplate";
}
get lc_aliases(): Record<string, string> {
return {
promptMessages: "messages",
};
}
promptMessages: Array<BaseMessagePromptTemplate | BaseMessage>;
validateTemplate = true;
templateFormat: TemplateFormat = "f-string";
constructor(input: ChatPromptTemplateInput<RunInput, PartialVariableName>) {
super(input);
// If input is mustache and validateTemplate is not defined, set it to false
if (
input.templateFormat === "mustache" &&
input.validateTemplate === undefined
) {
this.validateTemplate = false;
}
Object.assign(this, input);
if (this.validateTemplate) {
const inputVariablesMessages = new Set<string>();
for (const promptMessage of this.promptMessages) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (promptMessage instanceof BaseMessage) continue;
for (const inputVariable of promptMessage.inputVariables) {
inputVariablesMessages.add(inputVariable);
}
}
const totalInputVariables = this.inputVariables as string[];
const inputVariablesInstance = new Set(
this.partialVariables
? totalInputVariables.concat(Object.keys(this.partialVariables))
: totalInputVariables
);
const difference = new Set(
[...inputVariablesInstance].filter(
(x) => !inputVariablesMessages.has(x)
)
);
if (difference.size > 0) {
throw new Error(
`Input variables \`${[
...difference,
]}\` are not used in any of the prompt messages.`
);
}
const otherDifference = new Set(
[...inputVariablesMessages].filter(
(x) => !inputVariablesInstance.has(x)
)
);
if (otherDifference.size > 0) {
throw new Error(
`Input variables \`${[
...otherDifference,
]}\` are used in prompt messages but not in the prompt template.`
);
}
}
}
_getPromptType(): "chat" {
return "chat";
}
private async _parseImagePrompts(
message: BaseMessage,
inputValues: InputValues<
PartialVariableName | Extract<keyof RunInput, string>
>
): Promise<BaseMessage> {
if (typeof message.content === "string") {
return message;
}
const formattedMessageContent = await Promise.all(
message.content.map(async (item) => {
if (item.type !== "image_url") {
return item;
}
let imageUrl = "";
if (typeof item.image_url === "string") {
imageUrl = item.image_url;
} else {
imageUrl = item.image_url.url;
}
const promptTemplatePlaceholder = PromptTemplate.fromTemplate(
imageUrl,
{
templateFormat: this.templateFormat,
}
);
const formattedUrl = await promptTemplatePlaceholder.format(
inputValues
);
if (typeof item.image_url !== "string" && "url" in item.image_url) {
// eslint-disable-next-line no-param-reassign
item.image_url.url = formattedUrl;
} else {
// eslint-disable-next-line no-param-reassign
item.image_url = formattedUrl;
}
return item;
})
);
// eslint-disable-next-line no-param-reassign
message.content = formattedMessageContent;
return message;
}
async formatMessages(
values: TypedPromptInputValues<RunInput>
): Promise<BaseMessage[]> {
const allValues = await this.mergePartialAndUserVariables(values);
let resultMessages: BaseMessage[] = [];
for (const promptMessage of this.promptMessages) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (promptMessage instanceof BaseMessage) {
resultMessages.push(
await this._parseImagePrompts(promptMessage, allValues)
);
} else {
const inputValues = promptMessage.inputVariables.reduce(
(acc, inputVariable) => {
if (
!(inputVariable in allValues) &&
!(isMessagesPlaceholder(promptMessage) && promptMessage.optional)
) {
const error = addLangChainErrorFields(
new Error(
`Missing value for input variable \`${inputVariable.toString()}\``
),
"INVALID_PROMPT_INPUT"
);
throw error;
}
acc[inputVariable] = allValues[inputVariable];
return acc;
},
{} as InputValues
);
const message = await promptMessage.formatMessages(inputValues);
resultMessages = resultMessages.concat(message);
}
}
return resultMessages;
}
async partial<NewPartialVariableName extends string>(
values: PartialValues<NewPartialVariableName>
) {
// This is implemented in a way it doesn't require making
// BaseMessagePromptTemplate aware of .partial()
const newInputVariables = this.inputVariables.filter(
(iv) => !(iv in values)
) as Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>[];
const newPartialVariables = {
...(this.partialVariables ?? {}),
...values,
} as PartialValues<PartialVariableName | NewPartialVariableName>;
const promptDict = {
...this,
inputVariables: newInputVariables,
partialVariables: newPartialVariables,
};
return new ChatPromptTemplate<
InputValues<
Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>
>
>(promptDict);
}
/**
* Load prompt template from a template f-string
*/
static fromTemplate<
// eslint-disable-next-line @typescript-eslint/ban-types
RunInput extends InputValues = Symbol,
T extends string = string
>(
template: T,
options?: Omit<
PromptTemplateInput<RunInput, string, "f-string">,
"template" | "inputVariables"
>
): ChatPromptTemplate<ExtractedFStringParams<T, RunInput>>;
static fromTemplate<
// eslint-disable-next-line @typescript-eslint/ban-types
RunInput extends InputValues = Symbol,
T extends string = string
>(
template: T,
options?: Omit<
PromptTemplateInput<RunInput, string>,
"template" | "inputVariables"
>
): ChatPromptTemplate<ExtractedFStringParams<T, RunInput>>;
static fromTemplate<
// eslint-disable-next-line @typescript-eslint/ban-types
RunInput extends InputValues = Symbol,
T extends string = string
>(
template: T,
options?: Omit<
PromptTemplateInput<RunInput, string, "mustache">,
"template" | "inputVariables"
>
): ChatPromptTemplate<InputValues>;
static fromTemplate<
// eslint-disable-next-line @typescript-eslint/ban-types
RunInput extends InputValues = Symbol,
T extends string = string
>(
template: T,
options?: Omit<
PromptTemplateInput<RunInput, string, TemplateFormat>,
"template" | "inputVariables"
>
): ChatPromptTemplate<ExtractedFStringParams<T, RunInput> | InputValues> {
const prompt = PromptTemplate.fromTemplate(template, options);
const humanTemplate = new HumanMessagePromptTemplate({ prompt });
return this.fromMessages<
// eslint-disable-next-line @typescript-eslint/ban-types
RunInput extends Symbol ? ParamsFromFString<T> : RunInput
>([humanTemplate]);
}
/**
* Create a chat model-specific prompt from individual chat messages
* or message-like tuples.
* @param promptMessages Messages to be passed to the chat model
* @returns A new ChatPromptTemplate
*/
| |
144411
|
import { BaseStringPromptTemplate } from "./string.js";
import type {
BasePromptTemplateInput,
TypedPromptInputValues,
Example,
} from "./base.js";
import type { BaseExampleSelector } from "../example_selectors/base.js";
import {
type TemplateFormat,
checkValidTemplate,
renderTemplate,
} from "./template.js";
import { PromptTemplate } from "./prompt.js";
import type { SerializedFewShotTemplate } from "./serde.js";
import type { InputValues, PartialValues } from "../utils/types/index.js";
import type { BaseMessage } from "../messages/index.js";
import {
BaseChatPromptTemplate,
type BaseMessagePromptTemplate,
} from "./chat.js";
export interface FewShotPromptTemplateInput
extends BasePromptTemplateInput<InputValues> {
/**
* Examples to format into the prompt. Exactly one of this or
* {@link exampleSelector} must be
* provided.
*/
examples?: Example[];
/**
* An {@link BaseExampleSelector} Examples to format into the prompt. Exactly one of this or
* {@link examples} must be
* provided.
*/
exampleSelector?: BaseExampleSelector;
/**
* An {@link PromptTemplate} used to format a single example.
*/
examplePrompt: PromptTemplate;
/**
* String separator used to join the prefix, the examples, and suffix.
*/
exampleSeparator?: string;
/**
* A prompt template string to put before the examples.
*
* @defaultValue `""`
*/
prefix?: string;
/**
* A prompt template string to put after the examples.
*/
suffix?: string;
/**
* The format of the prompt template. Options are: 'f-string'
*/
templateFormat?: TemplateFormat;
/**
* Whether or not to try validating the template on initialization.
*/
validateTemplate?: boolean;
}
/**
* Prompt template that contains few-shot examples.
* @augments BasePromptTemplate
* @augments FewShotPromptTemplateInput
* @example
* ```typescript
* const examplePrompt = PromptTemplate.fromTemplate(
* "Input: {input}\nOutput: {output}",
* );
*
* const exampleSelector = await SemanticSimilarityExampleSelector.fromExamples(
* [
* { input: "happy", output: "sad" },
* { input: "tall", output: "short" },
* { input: "energetic", output: "lethargic" },
* { input: "sunny", output: "gloomy" },
* { input: "windy", output: "calm" },
* ],
* new OpenAIEmbeddings(),
* HNSWLib,
* { k: 1 },
* );
*
* const dynamicPrompt = new FewShotPromptTemplate({
* exampleSelector,
* examplePrompt,
* prefix: "Give the antonym of every input",
* suffix: "Input: {adjective}\nOutput:",
* inputVariables: ["adjective"],
* });
*
* // Format the dynamic prompt with the input 'rainy'
* console.log(await dynamicPrompt.format({ adjective: "rainy" }));
*
* ```
*/
export class FewShotPromptTemplate
extends BaseStringPromptTemplate
implements FewShotPromptTemplateInput
{
lc_serializable = false;
examples?: InputValues[];
exampleSelector?: BaseExampleSelector | undefined;
examplePrompt: PromptTemplate;
suffix = "";
exampleSeparator = "\n\n";
prefix = "";
templateFormat: TemplateFormat = "f-string";
validateTemplate = true;
constructor(input: FewShotPromptTemplateInput) {
super(input);
Object.assign(this, input);
if (this.examples !== undefined && this.exampleSelector !== undefined) {
throw new Error(
"Only one of 'examples' and 'example_selector' should be provided"
);
}
if (this.examples === undefined && this.exampleSelector === undefined) {
throw new Error(
"One of 'examples' and 'example_selector' should be provided"
);
}
if (this.validateTemplate) {
let totalInputVariables: string[] = this.inputVariables;
if (this.partialVariables) {
totalInputVariables = totalInputVariables.concat(
Object.keys(this.partialVariables)
);
}
checkValidTemplate(
this.prefix + this.suffix,
this.templateFormat,
totalInputVariables
);
}
}
_getPromptType(): "few_shot" {
return "few_shot";
}
static lc_name() {
return "FewShotPromptTemplate";
}
private async getExamples(
inputVariables: InputValues
): Promise<InputValues[]> {
if (this.examples !== undefined) {
return this.examples;
}
if (this.exampleSelector !== undefined) {
return this.exampleSelector.selectExamples(inputVariables);
}
throw new Error(
"One of 'examples' and 'example_selector' should be provided"
);
}
async partial<NewPartialVariableName extends string>(
values: PartialValues<NewPartialVariableName>
) {
const newInputVariables = this.inputVariables.filter(
(iv) => !(iv in values)
);
const newPartialVariables = {
...(this.partialVariables ?? {}),
...values,
};
const promptDict = {
...this,
inputVariables: newInputVariables,
partialVariables: newPartialVariables,
};
return new FewShotPromptTemplate(promptDict);
}
/**
* Formats the prompt with the given values.
* @param values The values to format the prompt with.
* @returns A promise that resolves to a string representing the formatted prompt.
*/
async format(values: InputValues): Promise<string> {
const allValues = await this.mergePartialAndUserVariables(values);
const examples = await this.getExamples(allValues);
const exampleStrings = await Promise.all(
examples.map((example) => this.examplePrompt.format(example))
);
const template = [this.prefix, ...exampleStrings, this.suffix].join(
this.exampleSeparator
);
return renderTemplate(template, this.templateFormat, allValues);
}
serialize(): SerializedFewShotTemplate {
if (this.exampleSelector || !this.examples) {
throw new Error(
"Serializing an example selector is not currently supported"
);
}
if (this.outputParser !== undefined) {
throw new Error(
"Serializing an output parser is not currently supported"
);
}
return {
_type: this._getPromptType(),
input_variables: this.inputVariables,
example_prompt: this.examplePrompt.serialize(),
example_separator: this.exampleSeparator,
suffix: this.suffix,
prefix: this.prefix,
template_format: this.templateFormat,
examples: this.examples,
};
}
static async deserialize(
data: SerializedFewShotTemplate
): Promise<FewShotPromptTemplate> {
const { example_prompt } = data;
if (!example_prompt) {
throw new Error("Missing example prompt");
}
const examplePrompt = await PromptTemplate.deserialize(example_prompt);
let examples: Example[];
if (Array.isArray(data.examples)) {
examples = data.examples;
} else {
throw new Error(
"Invalid examples format. Only list or string are supported."
);
}
return new FewShotPromptTemplate({
inputVariables: data.input_variables,
examplePrompt,
examples,
exampleSeparator: data.example_separator,
prefix: data.prefix,
suffix: data.suffix,
templateFormat: data.template_format,
});
}
}
export interface FewShotChatMessagePromptTemplateInput
extends BasePromptTemplateInput<InputValues> {
/**
* Examples to format into the prompt. Exactly one of this or
* {@link exampleSelector} must be
* provided.
*/
examples?: Example[];
/**
* An {@link BaseMessagePromptTemplate} | {@link BaseChatPromptTemplate} used to format a single example.
*/
examplePrompt: BaseMessagePromptTemplate | BaseChatPromptTemplate;
/**
* String separator used to join the prefix, the examples, and suffix.
*
* @defaultValue `"\n\n"`
*/
exampleSeparator?: string;
/**
* An {@link BaseExampleSelector} Examples to format into the prompt. Exactly one of this or
* {@link examples} must be
* provided.
*/
exampleSelector?: BaseExampleSelector | undefined;
/**
* A prompt template string to put before the examples.
*
* @defaultValue `""`
*/
prefix?: string;
/**
* A prompt template string to put after the examples.
*
* @defaultValue `""`
*/
suffix?: string;
/**
* The format of the prompt template. Options are: 'f-string'
*
* @defaultValue `f-string`
*/
templateFormat?: TemplateFormat;
/**
* Whether or not to try validating the template on initialization.
*
* @defaultValue `true`
*/
validateTemplate?: boolean;
}
| |
144417
|
import { expect, test } from "@jest/globals";
import { PromptTemplate } from "../prompt.js";
import { Document } from "../../documents/document.js";
test("Test using partial", async () => {
const prompt = new PromptTemplate({
template: "{foo}{bar}",
inputVariables: ["foo"],
partialVariables: { bar: "baz" },
});
expect(await prompt.format({ foo: "foo" })).toBe("foobaz");
});
test("Test using partial with an extra variable", async () => {
const prompt = new PromptTemplate({
template: "{foo}{bar}",
inputVariables: ["foo"],
partialVariables: { bar: "baz" },
});
expect(await prompt.format({ foo: "foo", unused: "nada" })).toBe("foobaz");
});
test("Test fromTemplate", async () => {
const prompt = PromptTemplate.fromTemplate("{foo}{bar}");
expect(
(await prompt.invoke({ foo: "foo", bar: "baz", unused: "eee" })).value
).toBe("foobaz");
});
test("Test fromTemplate with a non-string value", async () => {
const prompt = PromptTemplate.fromTemplate("{foo}{bar}");
expect(
(
await prompt.invoke({
foo: ["barbar"],
bar: [new Document({ pageContent: "bar" })],
})
).value
).toBe(`["barbar"][{"pageContent":"bar","metadata":{}}]`);
});
test("Test fromTemplate with escaped strings", async () => {
const prompt = PromptTemplate.fromTemplate("{{foo}}{{bar}}");
expect(await prompt.format({ unused: "eee" })).toBe("{foo}{bar}");
});
test("Test fromTemplate with type parameter", async () => {
const prompt = PromptTemplate.fromTemplate<{ foo: string }>("test");
// @ts-expect-error TS compiler should flag
expect(await prompt.format({ unused: "eee" })).toBe("test");
});
test("Test fromTemplate with missing variable should raise compiler error", async () => {
const prompt = PromptTemplate.fromTemplate("{foo}");
await expect(async () => {
// @ts-expect-error TS compiler should flag missing variable
await prompt.format({ unused: "eee" });
}).rejects.toThrow();
await expect(async () => {
// @ts-expect-error TS compiler should flag missing variable
await prompt.invoke({ unused: "eee" });
}).rejects.toThrow();
});
test("Test fromTemplate with extra variable should work", async () => {
const prompt = PromptTemplate.fromTemplate("{foo}");
expect(await prompt.format({ foo: "test", unused: "eee" })).toBe("test");
expect((await prompt.invoke({ foo: "test", unused: "eee" })).value).toBe(
"test"
);
});
test("Test using full partial", async () => {
const prompt = new PromptTemplate({
template: "{foo}{bar}",
inputVariables: [],
partialVariables: { bar: "baz", foo: "boo" },
});
expect(await prompt.format({})).toBe("boobaz");
});
test("Test partial", async () => {
const prompt = new PromptTemplate({
template: "{foo}{bar}",
inputVariables: ["foo", "bar"],
});
expect(prompt.inputVariables).toEqual(["foo", "bar"]);
const partialPrompt = await prompt.partial({ foo: "foo" });
// original prompt is not modified
expect(prompt.inputVariables).toEqual(["foo", "bar"]);
// partial prompt has only remaining variables
expect(partialPrompt.inputVariables).toEqual(["bar"]);
expect(await partialPrompt.format({ bar: "baz" })).toBe("foobaz");
});
test("Test partial with function", async () => {
const prompt = new PromptTemplate({
template: "{foo}{bar}",
inputVariables: ["foo", "bar"],
});
const partialPrompt = await prompt.partial({
foo: () => Promise.resolve("boo"),
});
expect(await partialPrompt.format({ bar: "baz" })).toBe("boobaz");
});
| |
144421
|
/* eslint-disable @typescript-eslint/no-explicit-any */
import { expect, test } from "@jest/globals";
import {
AIMessagePromptTemplate,
ChatPromptTemplate,
ChatMessagePromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
MessagesPlaceholder,
} from "../chat.js";
import { PromptTemplate } from "../prompt.js";
import {
SystemMessage,
HumanMessage,
AIMessage,
ChatMessage,
FunctionMessage,
} from "../../messages/index.js";
import { Document } from "../../documents/document.js";
function createChatPromptTemplate() {
const systemPrompt = new PromptTemplate({
template: "Here's some context: {context}",
inputVariables: ["context"],
});
const userPrompt = new PromptTemplate({
template: "Hello {foo}, I'm {bar}. Thanks for the {context}",
inputVariables: ["foo", "bar", "context"],
});
const aiPrompt = new PromptTemplate({
template: "I'm an AI. I'm {foo}. I'm {bar}.",
inputVariables: ["foo", "bar"],
});
const genericPrompt = new PromptTemplate({
template: "I'm a generic message. I'm {foo}. I'm {bar}.",
inputVariables: ["foo", "bar"],
});
// return new ChatPromptTemplate({
// promptMessages: [
// new SystemMessagePromptTemplate(systemPrompt),
// new HumanMessagePromptTemplate(userPrompt),
// new AIMessagePromptTemplate({ prompt: aiPrompt }),
// new ChatMessagePromptTemplate(genericPrompt, "test"),
// ],
// inputVariables: ["context", "foo", "bar"],
// });
return ChatPromptTemplate.fromMessages<{
foo: string;
bar: string;
context: string;
}>([
new SystemMessagePromptTemplate(systemPrompt),
new HumanMessagePromptTemplate(userPrompt),
new AIMessagePromptTemplate({ prompt: aiPrompt }),
new ChatMessagePromptTemplate(genericPrompt, "test"),
]);
}
test("Test format", async () => {
const chatPrompt = createChatPromptTemplate();
const messages = await chatPrompt.formatPromptValue({
context: "This is a context",
foo: "Foo",
bar: "Bar",
unused: "extra",
});
expect(messages.toChatMessages()).toEqual([
new SystemMessage("Here's some context: This is a context"),
new HumanMessage("Hello Foo, I'm Bar. Thanks for the This is a context"),
new AIMessage("I'm an AI. I'm Foo. I'm Bar."),
new ChatMessage("I'm a generic message. I'm Foo. I'm Bar.", "test"),
]);
});
test("Test format with invalid input values", async () => {
const chatPrompt = createChatPromptTemplate();
let error: any | undefined;
try {
// @ts-expect-error TS compiler should flag missing input variables
await chatPrompt.formatPromptValue({
context: "This is a context",
foo: "Foo",
});
} catch (e) {
error = e;
}
expect(error?.message).toContain("Missing value for input variable `bar`");
expect(error?.lc_error_code).toEqual("INVALID_PROMPT_INPUT");
});
test("Test format with invalid input variables", async () => {
const systemPrompt = new PromptTemplate({
template: "Here's some context: {context}",
inputVariables: ["context"],
});
const userPrompt = new PromptTemplate({
template: "Hello {foo}, I'm {bar}",
inputVariables: ["foo", "bar"],
});
expect(
() =>
new ChatPromptTemplate({
promptMessages: [
new SystemMessagePromptTemplate(systemPrompt),
new HumanMessagePromptTemplate(userPrompt),
],
inputVariables: ["context", "foo", "bar", "baz"],
})
).toThrow(
"Input variables `baz` are not used in any of the prompt messages."
);
expect(
() =>
new ChatPromptTemplate({
promptMessages: [
new SystemMessagePromptTemplate(systemPrompt),
new HumanMessagePromptTemplate(userPrompt),
],
inputVariables: ["context", "foo"],
})
).toThrow(
"Input variables `bar` are used in prompt messages but not in the prompt template."
);
});
test("Test fromTemplate", async () => {
const chatPrompt = ChatPromptTemplate.fromTemplate("Hello {foo}, I'm {bar}");
expect(chatPrompt.inputVariables).toEqual(["foo", "bar"]);
const messages = await chatPrompt.formatPromptValue({
foo: "Foo",
bar: "Bar",
});
expect(messages.toChatMessages()).toEqual([
new HumanMessage("Hello Foo, I'm Bar"),
]);
});
test("Test fromTemplate", async () => {
const chatPrompt = ChatPromptTemplate.fromTemplate("Hello {foo}, I'm {bar}");
expect(chatPrompt.inputVariables).toEqual(["foo", "bar"]);
expect(
(
await chatPrompt.invoke({
foo: ["barbar"],
bar: [new Document({ pageContent: "bar" })],
})
).toChatMessages()
).toEqual([
new HumanMessage(
`Hello ["barbar"], I'm [{"pageContent":"bar","metadata":{}}]`
),
]);
});
test("Test fromMessages", async () => {
const systemPrompt = new PromptTemplate({
template: "Here's some context: {context}",
inputVariables: ["context"],
});
const userPrompt = new PromptTemplate({
template: "Hello {foo}, I'm {bar}",
inputVariables: ["foo", "bar"],
});
// TODO: Fix autocomplete for the fromMessages method
const chatPrompt = ChatPromptTemplate.fromMessages([
new SystemMessagePromptTemplate(systemPrompt),
new HumanMessagePromptTemplate(userPrompt),
]);
expect(chatPrompt.inputVariables).toEqual(["context", "foo", "bar"]);
const messages = await chatPrompt.formatPromptValue({
context: "This is a context",
foo: "Foo",
bar: "Bar",
});
expect(messages.toChatMessages()).toEqual([
new SystemMessage("Here's some context: This is a context"),
new HumanMessage("Hello Foo, I'm Bar"),
]);
});
test("Test fromMessages with non-string inputs", async () => {
const systemPrompt = new PromptTemplate({
template: "Here's some context: {context}",
inputVariables: ["context"],
});
const userPrompt = new PromptTemplate({
template: "Hello {foo}, I'm {bar}",
inputVariables: ["foo", "bar"],
});
// TODO: Fix autocomplete for the fromMessages method
const chatPrompt = ChatPromptTemplate.fromMessages([
new SystemMessagePromptTemplate(systemPrompt),
new HumanMessagePromptTemplate(userPrompt),
]);
expect(chatPrompt.inputVariables).toEqual(["context", "foo", "bar"]);
const messages = await chatPrompt.formatPromptValue({
context: [new Document({ pageContent: "bar" })],
foo: "Foo",
bar: "Bar",
});
expect(messages.toChatMessages()).toEqual([
new SystemMessage(
`Here's some context: [{"pageContent":"bar","metadata":{}}]`
),
new HumanMessage("Hello Foo, I'm Bar"),
]);
});
test("Test fromMessages with a variety of ways to declare prompt messages", async () => {
const systemPrompt = new PromptTemplate({
template: "Here's some context: {context}",
inputVariables: ["context"],
});
// TODO: Fix autocomplete for the fromMessages method
const chatPrompt = ChatPromptTemplate.fromMessages([
new SystemMessagePromptTemplate(systemPrompt),
"Hello {foo}, I'm {bar}",
["assistant", "Nice to meet you, {bar}!"],
["human", "Thanks {foo}!!"],
]);
const messages = await chatPrompt.formatPromptValue({
context: "This is a context",
foo: "Foo",
bar: "Bar",
});
expect(messages.toChatMessages()).toEqual([
new SystemMessage("Here's some context: This is a context"),
new HumanMessage("Hello Foo, I'm Bar"),
new AIMessage("Nice to meet you, Bar!"),
new HumanMessage("Thanks Foo!!"),
]);
});
| |
144426
|
import { Document } from "../documents/document.js";
import { BaseDocumentTransformer } from "../documents/transformers.js";
/**
* Interface that defines the methods for loading and splitting documents.
*/
export interface DocumentLoader {
load(): Promise<Document[]>;
loadAndSplit(textSplitter?: BaseDocumentTransformer): Promise<Document[]>;
}
/**
* Abstract class that provides a default implementation for the
* loadAndSplit() method from the DocumentLoader interface. The load()
* method is left abstract and needs to be implemented by subclasses.
*/
export abstract class BaseDocumentLoader implements DocumentLoader {
/**
* Loads the documents.
* @returns A Promise that resolves with an array of Document instances.
*/
abstract load(): Promise<Document[]>;
/**
* @deprecated Use `this.load()` and `splitter.splitDocuments()` individually.
* Loads the documents and splits them using a specified text splitter.
* @param textSplitter The TextSplitter instance to use for splitting the loaded documents. Defaults to a RecursiveCharacterTextSplitter instance.
* @returns A Promise that resolves with an array of Document instances, each split according to the provided TextSplitter.
*/
async loadAndSplit(splitter?: BaseDocumentTransformer): Promise<Document[]> {
if (splitter === undefined) {
throw new Error("You must pass a text splitter to use this method.");
}
const docs = await this.load();
return splitter.invoke(docs);
}
}
| |
144429
|
export interface DocumentInput<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
Metadata extends Record<string, any> = Record<string, any>
> {
pageContent: string;
metadata?: Metadata;
/**
* An optional identifier for the document.
*
* Ideally this should be unique across the document collection and formatted
* as a UUID, but this will not be enforced.
*/
id?: string;
}
export interface DocumentInterface<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
Metadata extends Record<string, any> = Record<string, any>
> {
pageContent: string;
metadata: Metadata;
/**
* An optional identifier for the document.
*
* Ideally this should be unique across the document collection and formatted
* as a UUID, but this will not be enforced.
*/
id?: string;
}
/**
* Interface for interacting with a document.
*/
export class Document<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
Metadata extends Record<string, any> = Record<string, any>
> implements DocumentInput, DocumentInterface
{
pageContent: string;
metadata: Metadata;
// The ID field is optional at the moment.
// It will likely become required in a future major release after
// it has been adopted by enough vectorstore implementations.
/**
* An optional identifier for the document.
*
* Ideally this should be unique across the document collection and formatted
* as a UUID, but this will not be enforced.
*/
id?: string;
constructor(fields: DocumentInput<Metadata>) {
this.pageContent =
fields.pageContent !== undefined ? fields.pageContent.toString() : "";
this.metadata = fields.metadata ?? ({} as Metadata);
this.id = fields.id;
}
}
| |
144595
|
import { DocumentInterface } from "@langchain/core/documents";
import { Embeddings } from "@langchain/core/embeddings";
import {
cosineSimilarity,
euclideanDistance,
innerProduct,
} from "@langchain/core/utils/math";
import {
VectorStore,
VectorStoreRetriever,
VectorStoreRetrieverInput,
} from "@langchain/core/vectorstores";
/**
* Type for options when adding a document to the VectorStore.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type AddDocumentOptions = Record<string, any>;
export interface MatryoshkaRetrieverFields {
/**
* The number of documents to retrieve from the small store.
* @default 50
*/
smallK?: number;
/**
* The number of documents to retrieve from the large store.
* @default 8
*/
largeK?: number;
/**
* The metadata key to store the larger embeddings.
* @default "lc_large_embedding"
*/
largeEmbeddingKey?: string;
/**
* The embedding model to use when generating the large
* embeddings.
*/
largeEmbeddingModel: Embeddings;
/**
* The type of search to perform using the large embeddings.
* @default "cosine"
*/
searchType?: "cosine" | "innerProduct" | "euclidean";
}
/**
* A retriever that uses two sets of embeddings to perform adaptive retrieval. Based
* off of the "Matryoshka embeddings: faster OpenAI vector search using Adaptive Retrieval"
* blog post {@link https://supabase.com/blog/matryoshka-embeddings}.
*
*
* This class performs "Adaptive Retrieval" for searching text embeddings efficiently using the
* Matryoshka Representation Learning (MRL) technique. It retrieves documents similar to a query
* embedding in two steps:
*
* First-pass: Uses a lower dimensional sub-vector from the MRL embedding for an initial, fast,
* but less accurate search.
*
* Second-pass: Re-ranks the top results from the first pass using the full, high-dimensional
* embedding for higher accuracy.
*
*
* This code implements MRL embeddings for efficient vector search by combining faster,
* lower-dimensional initial search with accurate, high-dimensional re-ranking.
*/
export class MatryoshkaRetriever<
Store extends VectorStore = VectorStore
> extends VectorStoreRetriever<Store> {
smallK = 50;
largeK = 8;
largeEmbeddingKey = "lc_large_embedding";
largeEmbeddingModel: Embeddings;
searchType: "cosine" | "innerProduct" | "euclidean" = "cosine";
constructor(
fields: MatryoshkaRetrieverFields & VectorStoreRetrieverInput<Store>
) {
super(fields);
this.smallK = fields.smallK ?? this.smallK;
this.largeK = fields.largeK ?? this.largeK;
this.largeEmbeddingKey = fields.largeEmbeddingKey ?? this.largeEmbeddingKey;
this.largeEmbeddingModel = fields.largeEmbeddingModel;
this.searchType = fields.searchType ?? this.searchType;
}
/**
* Ranks documents based on their similarity to a query embedding using larger embeddings.
*
* This method takes a query embedding and a list of documents (smallResults) as input. Each document
* in the smallResults array has previously been associated with a large embedding stored in its metadata.
* Depending on the `searchType` (cosine, innerProduct, or euclidean), it calculates the similarity scores
* between the query embedding and each document's large embedding. It then ranks the documents based on
* these similarity scores, from the most similar to the least similar.
*
* The method returns a promise that resolves to an array of the top `largeK` documents, where `largeK`
* is a class property defining the number of documents to return. This subset of documents is determined
* by sorting the entire list of documents based on their similarity scores and then selecting the top
* `largeK` documents.
*
* @param {number[]} embeddedQuery The embedding of the query, represented as an array of numbers.
* @param {DocumentInterface[]} smallResults An array of documents, each with metadata that includes a large embedding for similarity comparison.
* @returns {Promise<DocumentInterface[]>} A promise that resolves to an array of the top `largeK` ranked documents based on their similarity to the query embedding.
*/
private _rankByLargeEmbeddings(
embeddedQuery: number[],
smallResults: DocumentInterface[]
): DocumentInterface[] {
const largeEmbeddings: Array<number[]> = smallResults.map((doc) =>
JSON.parse(doc.metadata[this.largeEmbeddingKey])
);
let func: () => Array<number[]>;
switch (this.searchType) {
case "cosine":
func = () => cosineSimilarity([embeddedQuery], largeEmbeddings);
break;
case "innerProduct":
func = () => innerProduct([embeddedQuery], largeEmbeddings);
break;
case "euclidean":
func = () => euclideanDistance([embeddedQuery], largeEmbeddings);
break;
default:
throw new Error(`Unknown search type: ${this.searchType}`);
}
// Calculate the similarity scores between the query embedding and the large embeddings
const [similarityScores] = func();
// Create an array of indices from 0 to N-1, where N is the number of documents
let indices = Array.from(
{ length: smallResults.length },
(_, index) => index
);
indices = indices
.map((v, i) => [similarityScores[i], v])
.sort(([a], [b]) => b - a)
.slice(0, this.largeK)
.map(([, i]) => i);
return indices.map((i) => smallResults[i]);
}
async _getRelevantDocuments(query: string): Promise<DocumentInterface[]> {
const [embeddedQuery, smallResults] = await Promise.all([
this.largeEmbeddingModel.embedQuery(query),
this.vectorStore.similaritySearch(query, this.smallK, this.filter),
]);
return this._rankByLargeEmbeddings(embeddedQuery, smallResults);
}
/**
* Override the default `addDocuments` method to embed the documents twice,
* once using the larger embeddings model, and then again using the default
* embedding model linked to the vector store.
*
* @param {DocumentInterface[]} documents - An array of documents to add to the vector store.
* @param {AddDocumentOptions} options - An optional object containing additional options for adding documents.
* @returns {Promise<string[] | void>} A promise that resolves to an array of the document IDs that were added to the vector store.
*/
override addDocuments = async (
documents: DocumentInterface[],
options?: AddDocumentOptions
): Promise<string[] | void> => {
// Insure documents metadata does not contain the large embedding key
if (documents.some((doc) => this.largeEmbeddingKey in doc.metadata)) {
throw new Error(
`All documents must not contain the large embedding key: ${this.largeEmbeddingKey} in their metadata.`
);
}
const allDocPageContent = documents.map((doc) => doc.pageContent);
const allDocLargeEmbeddings = await this.largeEmbeddingModel.embedDocuments(
allDocPageContent
);
const newDocuments: Array<DocumentInterface> = documents.map(
(doc, idx) => ({
...doc,
metadata: {
...doc.metadata,
[this.largeEmbeddingKey]: JSON.stringify(allDocLargeEmbeddings[idx]),
},
})
);
return this.vectorStore.addDocuments(newDocuments, options);
};
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.